Pārlūkot izejas kodu

AMBARI-5747 Remove facter-1.6.10, Ruby, Puppet dependencies from pom.xml and src (dsen)

Dmitry Sen 11 gadi atpakaļ
vecāks
revīzija
3d1171b06a
100 mainītis faili ar 1 papildinājumiem un 7638 dzēšanām
  1. 0 7
      LICENSE.txt
  2. 0 6
      ambari-agent/conf/unix/ambari-agent.ini
  3. 1 96
      ambari-agent/pom.xml
  4. 0 3
      ambari-agent/src/main/package/deb/control/postinst
  5. 0 1
      ambari-agent/src/main/package/rpm/postinstall.sh
  6. 0 48
      ambari-agent/src/main/puppet/manifestloader/site.pp
  7. 0 68
      ambari-agent/src/main/puppet/modules/configgenerator/manifests/configfile.pp
  8. 0 23
      ambari-agent/src/main/puppet/modules/configgenerator/manifests/init.pp
  9. 0 21
      ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/dashboard/service_check.pp
  10. 0 76
      ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/init.pp
  11. 0 28
      ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/params.pp
  12. 0 97
      ambari-agent/src/main/puppet/modules/hdp-dashboard/templates/cluster_configuration.json.erb
  13. 0 23
      ambari-agent/src/main/puppet/modules/hdp-flume/files/flumeSmoke.sh
  14. 0 25
      ambari-agent/src/main/puppet/modules/hdp-flume/manifests/client.pp
  15. 0 27
      ambari-agent/src/main/puppet/modules/hdp-flume/manifests/init.pp
  16. 0 24
      ambari-agent/src/main/puppet/modules/hdp-flume/manifests/params.pp
  17. 0 28
      ambari-agent/src/main/puppet/modules/hdp-flume/manifests/service.pp
  18. 0 24
      ambari-agent/src/main/puppet/modules/hdp-flume/templates/flume-env.sh.erb
  19. 0 59
      ambari-agent/src/main/puppet/modules/hdp-flume/templates/log4j.properties.erb
  20. 0 37
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkGmetad.sh
  21. 0 62
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkGmond.sh
  22. 0 34
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkRrdcached.sh
  23. 0 73
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmetad.init
  24. 0 204
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmetadLib.sh
  25. 0 73
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmond.init
  26. 0 556
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmondLib.sh
  27. 0 207
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/rrd.py
  28. 0 47
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/rrdcachedLib.sh
  29. 0 141
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/setupGanglia.sh
  30. 0 64
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startGmetad.sh
  31. 0 80
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startGmond.sh
  32. 0 69
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startRrdcached.sh
  33. 0 43
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopGmetad.sh
  34. 0 55
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopGmond.sh
  35. 0 41
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopRrdcached.sh
  36. 0 28
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/teardownGanglia.sh
  37. 0 79
      ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/config.pp
  38. 0 43
      ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/config/generate_daemon.pp
  39. 0 36
      ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/hdp-gmetad/service_check.pp
  40. 0 36
      ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/hdp-gmond/service_check.pp
  41. 0 53
      ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/init.pp
  42. 0 165
      ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/monitor.pp
  43. 0 79
      ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/monitor_and_server.pp
  44. 0 35
      ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/params.pp
  45. 0 259
      ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/server.pp
  46. 0 43
      ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaClusters.conf.erb
  47. 0 24
      ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaEnv.sh.erb
  48. 0 62
      ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaLib.sh.erb
  49. 0 62
      ambari-agent/src/main/puppet/modules/hdp-hadoop/files/checkForFormat.sh
  50. 0 53
      ambari-agent/src/main/puppet/modules/hdp-hadoop/files/checkWebUI.py
  51. 0 132
      ambari-agent/src/main/puppet/modules/hdp-hadoop/files/task-log4j.properties
  52. 0 65
      ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_mode.rb
  53. 0 47
      ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_namenode_id.rb
  54. 0 51
      ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_owner.rb
  55. 0 56
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/client.pp
  56. 0 100
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/datanode.pp
  57. 0 36
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/glusterfs.pp
  58. 0 26
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/glusterfs_service_check.pp
  59. 0 84
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/copyfromlocal.pp
  60. 0 49
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/decommission.pp
  61. 0 121
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/directory.pp
  62. 0 42
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/generate_exclude_file.pp
  63. 0 170
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/service_check.pp
  64. 0 547
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp
  65. 0 96
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/jobtracker.pp
  66. 0 29
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/jobtracker/service_check.pp
  67. 0 60
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/journalnode.pp
  68. 0 75
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/mapred/service_check.pp
  69. 0 285
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode.pp
  70. 0 61
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode/format.pp
  71. 0 28
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode/service_check.pp
  72. 0 44
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/package.pp
  73. 0 222
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp
  74. 0 132
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/service.pp
  75. 0 24
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/jobtracker-conn.pp
  76. 0 27
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/master-conn.pp
  77. 0 27
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/namenode-conn.pp
  78. 0 46
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/smoketest.pp
  79. 0 98
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/snamenode.pp
  80. 0 94
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/tasktracker.pp
  81. 0 51
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/zkfc.pp
  82. 0 25
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/commons-logging.properties.erb
  83. 0 3
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/exclude_hosts_list.erb
  84. 0 122
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb
  85. 0 45
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties-GANGLIA.erb
  86. 0 45
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties.erb
  87. 0 17
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hdfs.conf.erb
  88. 0 91
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/health_check-v2.erb
  89. 0 118
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/health_check.erb
  90. 0 3
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/include_hosts_list.erb
  91. 0 227
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/log4j.properties.erb
  92. 0 3
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/slaves.erb
  93. 0 20
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/taskcontroller.cfg.erb
  94. 0 26
      ambari-agent/src/main/puppet/modules/hdp-hbase/files/hbaseSmoke.sh
  95. 0 32
      ambari-agent/src/main/puppet/modules/hdp-hbase/files/hbaseSmokeVerify.sh
  96. 0 51
      ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/client.pp
  97. 0 113
      ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/hbase/service_check.pp
  98. 0 155
      ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/init.pp
  99. 0 24
      ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/master-conn.pp
  100. 0 66
      ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/master.pp

+ 0 - 7
LICENSE.txt

@@ -1,4 +1,3 @@
-
                                  Apache License
                                  Apache License
                            Version 2.0, January 2004
                            Version 2.0, January 2004
                         http://www.apache.org/licenses/
                         http://www.apache.org/licenses/
@@ -209,16 +208,10 @@ notices and license terms. Your use of the source code for the these
 subcomponents is subject to the terms and conditions of the following
 subcomponents is subject to the terms and conditions of the following
 licenses. 
 licenses. 
 
 
-For the stdlib in puppet modules 
-
-Copyright (C) 2011 Puppet Labs Inc
-
 and some parts:
 and some parts:
 
 
 Copyright (C) 2011 Krzysztof Wilczynski
 Copyright (C) 2011 Krzysztof Wilczynski
 
 
-Puppet Labs can be contacted at: info@puppetlabs.com
-
 Licensed under the Apache License, Version 2.0 (the "License");
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
 you may not use this file except in compliance with the License.
 You may obtain a copy of the License at
 You may obtain a copy of the License at

+ 0 - 6
ambari-agent/conf/unix/ambari-agent.ini

@@ -27,12 +27,6 @@ ping_port=8670
 cache_dir=/var/lib/ambari-agent/cache
 cache_dir=/var/lib/ambari-agent/cache
 tolerate_download_failures=true
 tolerate_download_failures=true
 
 
-[puppet]
-puppetmodules=/var/lib/ambari-agent/puppet
-ruby_home=/usr/lib/ambari-agent/lib/ruby-1.8.7-p370
-puppet_home=/usr/lib/ambari-agent/lib/puppet-2.7.9
-facter_home=/usr/lib/ambari-agent/lib/facter-1.6.10
-
 [command]
 [command]
 maxretries=2
 maxretries=2
 sleepBetweenRetries=1
 sleepBetweenRetries=1

+ 1 - 96
ambari-agent/pom.xml

@@ -37,13 +37,10 @@
     <package.log.dir>/var/log/ambari-agent</package.log.dir>
     <package.log.dir>/var/log/ambari-agent</package.log.dir>
     <package.pid.dir>/var/run/ambari-agent</package.pid.dir>
     <package.pid.dir>/var/run/ambari-agent</package.pid.dir>
     <skipTests>false</skipTests>
     <skipTests>false</skipTests>
-    <facter.tar>http://downloads.puppetlabs.com/facter/facter-1.6.10.tar.gz</facter.tar>
-    <puppet.tar>http://downloads.puppetlabs.com/puppet/puppet-2.7.9.tar.gz</puppet.tar>
     <agent.install.dir>/usr/lib/python2.6/site-packages/ambari_agent</agent.install.dir>
     <agent.install.dir>/usr/lib/python2.6/site-packages/ambari_agent</agent.install.dir>
     <resmgmt.install.dir>/usr/lib/python2.6/site-packages/resource_management</resmgmt.install.dir>
     <resmgmt.install.dir>/usr/lib/python2.6/site-packages/resource_management</resmgmt.install.dir>
     <common_functions.install.dir>/usr/lib/ambari-agent/lib/common_functions</common_functions.install.dir>
     <common_functions.install.dir>/usr/lib/ambari-agent/lib/common_functions</common_functions.install.dir>
     <jinja.install.dir>/usr/lib/python2.6/site-packages/jinja2</jinja.install.dir>
     <jinja.install.dir>/usr/lib/python2.6/site-packages/jinja2</jinja.install.dir>
-    <ruby.tar>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos6/ruby-1.8.7-p370.tar.gz</ruby.tar>
     <lib.dir>/usr/lib/ambari-agent/lib</lib.dir>
     <lib.dir>/usr/lib/ambari-agent/lib</lib.dir>
     <python.ver>python &gt;= 2.6</python.ver>
     <python.ver>python &gt;= 2.6</python.ver>
     <deb.python.ver>python (&gt;= 2.6)</deb.python.ver>
     <deb.python.ver>python (&gt;= 2.6)</deb.python.ver>
@@ -53,20 +50,6 @@
     <target.cache.dir>${project.build.directory}/cache/</target.cache.dir>
     <target.cache.dir>${project.build.directory}/cache/</target.cache.dir>
     <resource.keeper.script>${ambari.server.module}/src/main/python/ambari_server/resourceFilesKeeper.py</resource.keeper.script>
     <resource.keeper.script>${ambari.server.module}/src/main/python/ambari_server/resourceFilesKeeper.py</resource.keeper.script>
   </properties>
   </properties>
-  <profiles>
-    <profile>
-      <id>suse11</id>
-      <properties>
-        <ruby.tar>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/suse11/ruby-1.8.7-p370.tar.gz</ruby.tar>
-      </properties>
-    </profile>
-    <profile>
-      <id>centos5</id>
-      <properties>
-        <ruby.tar>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos5/ruby-1.8.7-p370.tar.gz</ruby.tar>
-      </properties>
-    </profile>
-  </profiles>
   <build>
   <build>
     <plugins>
     <plugins>
       <plugin>
       <plugin>
@@ -131,7 +114,7 @@
                 <argument>unitTests.py</argument>
                 <argument>unitTests.py</argument>
               </arguments>
               </arguments>
               <environmentVariables>
               <environmentVariables>
-                <PYTHONPATH>${project.basedir}/../ambari-common/src/main/python/jinja2:${project.basedir}/../ambari-common/src/main/python/common_functions:${project.basedir}/../ambari-common/src/test/python:${project.basedir}/../ambari-common/src/main/python:${project.basedir}/src/main/python/ambari_agent:${project.basedir}/src/main/python/resource_management:${project.basedir}/src/test/python/ambari_agent:${project.basedir}/src/test/python/resource_management:${project.basedir}/src/main/python:${project.basedir}/../ambari-agent/src/main/puppet/modules/hdp-hadoop/files:$PYTHONPATH</PYTHONPATH>
+                <PYTHONPATH>${project.basedir}/../ambari-common/src/main/python/jinja2:${project.basedir}/../ambari-common/src/main/python/common_functions:${project.basedir}/../ambari-common/src/test/python:${project.basedir}/../ambari-common/src/main/python:${project.basedir}/src/main/python/ambari_agent:${project.basedir}/src/main/python/resource_management:${project.basedir}/src/test/python/ambari_agent:${project.basedir}/src/test/python/resource_management:${project.basedir}/src/main/python:${project.basedir}/../ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files:${project.basedir}/../ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/files:$PYTHONPATH</PYTHONPATH>
               </environmentVariables>
               </environmentVariables>
               <skip>${skipTests}</skip>
               <skip>${skipTests}</skip>
             </configuration>
             </configuration>
@@ -255,17 +238,6 @@
                 </source>
                 </source>
               </sources>
               </sources>
             </mapping>
             </mapping>
-            <mapping>
-              <directory>${lib.dir}</directory>
-              <filemode>755</filemode>
-              <username>root</username>
-              <groupname>root</groupname>
-              <sources>
-                <source>
-                  <location>${project.build.directory}/lib</location>
-                </source>
-              </sources>
-            </mapping>
             <mapping>
             <mapping>
               <directory>${jinja.install.dir}</directory>
               <directory>${jinja.install.dir}</directory>
               <sources>
               <sources>
@@ -288,17 +260,6 @@
                 </source>
                 </source>
               </sources>
               </sources>
             </mapping>
             </mapping>
-            <mapping>
-              <directory>/var/lib/${project.artifactId}/puppet</directory>
-              <filemode>755</filemode>
-              <username>root</username>
-              <groupname>root</groupname>
-              <sources>
-                <source>
-                  <location>src/main/puppet</location>
-                </source>
-              </sources>
-            </mapping>
             <mapping>
             <mapping>
               <directory>/etc/ambari-agent/conf</directory>
               <directory>/etc/ambari-agent/conf</directory>
               <filemode>755</filemode>
               <filemode>755</filemode>
@@ -438,17 +399,6 @@
                 <prefix>${resmgmt.install.dir}</prefix>
                 <prefix>${resmgmt.install.dir}</prefix>
               </mapper>
               </mapper>
             </data>
             </data>
-            <data>
-              <src>${project.build.directory}/lib</src>
-              <type>directory</type>
-              <mapper>
-                <type>perm</type>
-                <prefix>${lib.dir}</prefix>
-                <user>root</user>
-                <group>root</group>
-                <filemode>755</filemode>
-              </mapper>
-            </data>
             <data>
             <data>
               <src>${project.basedir}/../ambari-common/src/main/python/jinja2/jinja2</src>
               <src>${project.basedir}/../ambari-common/src/main/python/jinja2/jinja2</src>
               <excludes>${project.basedir}/../ambari-common/src/main/python/jinja2/jinja2/testsuite</excludes>
               <excludes>${project.basedir}/../ambari-common/src/main/python/jinja2/jinja2/testsuite</excludes>
@@ -469,17 +419,6 @@
                 <filemode>755</filemode>
                 <filemode>755</filemode>
               </mapper>
               </mapper>
             </data>
             </data>
-            <data>
-              <src>src/main/puppet</src>
-              <type>directory</type>
-              <mapper>
-                <type>perm</type>
-                <prefix>/var/lib/${project.artifactId}/puppet</prefix>
-                <user>root</user>
-                <group>root</group>
-                <filemode>755</filemode>
-              </mapper>
-            </data>
             <data>
             <data>
               <src>conf/unix/ambari-agent.ini</src>
               <src>conf/unix/ambari-agent.ini</src>
               <type>file</type>
               <type>file</type>
@@ -599,39 +538,6 @@
           </dataSet>
           </dataSet>
         </configuration>
         </configuration>
       </plugin>
       </plugin>
-      <plugin>
-        <groupId>com.github.goldin</groupId>
-        <artifactId>copy-maven-plugin</artifactId>
-        <version>0.2.5</version>
-        <executions>
-          <execution>
-            <id>create-archive</id>
-            <phase>package</phase>
-            <goals>
-              <goal>copy</goal>
-            </goals>
-            <configuration>
-              <resources>
-                <resource>
-                  <targetPath>${project.build.directory}/lib</targetPath>
-                  <file>${ruby.tar}</file>
-                  <unpack>true</unpack>
-                </resource>
-                <resource>
-                  <targetPath>${project.build.directory}/lib</targetPath>
-                  <file>${facter.tar}</file>
-                  <unpack>true</unpack>
-                </resource>
-                <resource>
-                  <targetPath>${project.build.directory}/lib</targetPath>
-                  <file>${puppet.tar}</file>
-                  <unpack>true</unpack>
-                </resource>
-              </resources>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
       <plugin>
       <plugin>
         <artifactId>maven-resources-plugin</artifactId>
         <artifactId>maven-resources-plugin</artifactId>
         <version>2.6</version>
         <version>2.6</version>
@@ -694,7 +600,6 @@
             <exclude>src/test/python/ambari_agent/dummy_files/*</exclude>
             <exclude>src/test/python/ambari_agent/dummy_files/*</exclude>
             <exclude>src/test/python/ambari_agent/dummy*.txt</exclude>
             <exclude>src/test/python/ambari_agent/dummy*.txt</exclude>
             <exclude>src/main/python/ambari_agent/imports.txt</exclude>
             <exclude>src/main/python/ambari_agent/imports.txt</exclude>
-            <exclude>src/main/puppet/modules/stdlib/**</exclude>
             <exclude>**/*.erb</exclude>
             <exclude>**/*.erb</exclude>
             <exclude>**/*.json</exclude>
             <exclude>**/*.json</exclude>
             <exclude>**/*.pydevproject</exclude>
             <exclude>**/*.pydevproject</exclude>

+ 0 - 3
ambari-agent/src/main/package/deb/control/postinst

@@ -21,9 +21,6 @@ if [ "$1" == "configure" ]; then  # Action is install
   update-rc.d ambari-agent defaults
   update-rc.d ambari-agent defaults
 fi
 fi
 
 
-
-chmod 755 /usr/lib/ambari-agent/lib/facter-1.6.10/bin/facter /usr/lib/ambari-agent/lib/puppet-2.7.9/bin/filebucket /usr/lib/ambari-agent/lib/puppet-2.7.9/bin/pi /usr/lib/ambari-agent/lib/puppet-2.7.9/bin/puppet /usr/lib/ambari-agent/lib/puppet-2.7.9/bin/puppetdoc /usr/lib/ambari-agent/lib/puppet-2.7.9/bin/ralsh /usr/lib/ambari-agent/lib/ruby-1.8.7-p370/bin/*
-
 BAK=/etc/ambari-agent/conf/ambari-agent.ini.old
 BAK=/etc/ambari-agent/conf/ambari-agent.ini.old
 ORIG=/etc/ambari-agent/conf/ambari-agent.ini
 ORIG=/etc/ambari-agent/conf/ambari-agent.ini
 
 

+ 0 - 1
ambari-agent/src/main/package/rpm/postinstall.sh

@@ -28,7 +28,6 @@ if [ "$1" -eq 2 ]; then # Action is upgrade
   fi
   fi
 fi
 fi
 
 
-chmod 755 /usr/lib/ambari-agent/lib/facter-1.6.10/bin/facter /usr/lib/ambari-agent/lib/puppet-2.7.9/bin/filebucket /usr/lib/ambari-agent/lib/puppet-2.7.9/bin/pi /usr/lib/ambari-agent/lib/puppet-2.7.9/bin/puppet /usr/lib/ambari-agent/lib/puppet-2.7.9/bin/puppetdoc /usr/lib/ambari-agent/lib/puppet-2.7.9/bin/ralsh /usr/lib/ambari-agent/lib/ruby-1.8.7-p370/bin/*
 
 
 BAK=/etc/ambari-agent/conf/ambari-agent.ini.old
 BAK=/etc/ambari-agent/conf/ambari-agent.ini.old
 ORIG=/etc/ambari-agent/conf/ambari-agent.ini
 ORIG=/etc/ambari-agent/conf/ambari-agent.ini

+ 0 - 48
ambari-agent/src/main/puppet/manifestloader/site.pp

@@ -1,48 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-class manifestloader () {
-    file { '/etc/puppet/agent/modules.tgz':
-      ensure => present,
-      source => "puppet:///modules/catalog/modules.tgz",  
-      mode => '0755',
-    }
-
-    exec { 'untar_modules':
-      command => "rm -rf /etc/puppet/agent/modules ; tar zxf /etc/puppet/agent/modules.tgz -C /etc/puppet/agent/ --strip-components 3",
-      path    => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
-    } 
-
-    exec { 'puppet_apply':
-      command   => "sh /etc/puppet/agent/modules/puppetApply.sh",
-      timeout   => 1800,
-      path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-      logoutput => "true"
-    }
-
-    File['/etc/puppet/agent/modules.tgz'] -> Exec['untar_modules'] -> Exec['puppet_apply']
-}
-
-node default {
- stage{1 :}
- class {'manifestloader': stage => 1}
-}
-

+ 0 - 68
ambari-agent/src/main/puppet/modules/configgenerator/manifests/configfile.pp

@@ -1,68 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-#
-# Generates xml configs from the given key-value hash maps
-#
-# Config file format:
-#
-# <configuration>
-#   <property>
-#     <name>name1</name><value>value1</value>
-#   </property>
-#     ..
-#   <property>
-#     <name>nameN</name><value>valueN</value>
-#   </property>
-# </configuration>
-#
-# Params:
-# - configname - name of the config file (class title by default)
-# - modulespath - modules path ('/etc/puppet/modules' by default)
-# - module - module name
-# - properties - set of the key-value pairs (puppet hash) which corresponds to property name - property value pairs of config file
-#
-# Note: Set correct $modulespath in the configgenerator (or pass it as parameter)
-#
-
-define configgenerator::configfile ($modulespath='/etc/puppet/modules', $filename, $module, $configuration, $owner = "root", $group = "root", $mode = undef) {
-  $configcontent = inline_template('<!--<%=Time.now.asctime %>-->
-  <configuration>
-  <% configuration.each do |key,value| -%>
-  <property>
-    <name><%=key %></name>
-    <value><%=value %></value>
-  </property>
-  <% end -%>
-</configuration>')
- 
-
-debug("Generating config: ${modulespath}/${filename}")
-
-file {"${modulespath}/${filename}":
-  ensure  => present,
-  content => $configcontent,
-  path => "${modulespath}/${filename}",
-  owner => $owner,
-  group => $group,
-  mode => $mode
-}
-} 

+ 0 - 23
ambari-agent/src/main/puppet/modules/configgenerator/manifests/init.pp

@@ -1,23 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-class configgenerator() {
-}

+ 0 - 21
ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/dashboard/service_check.pp

@@ -1,21 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-dashboard::dashboard::service_check(){}

+ 0 - 76
ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/init.pp

@@ -1,76 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-dashboard(
-  $service_state = $hdp::params::cluster_client_state,
-  $opts = {}
-) inherits hdp-dashboard::params
-{
-   if ($service_state == 'no_op') {
-   } elsif ($service_state == 'uninstalled') {
-    hdp::package { 'dashboard' :
-      ensure => 'uninstalled',
-      java_needed => 'false',
-      size   => 64
-    }
-    hdp::directory_recursive_create { $conf_dir :
-      service_state => $service_state,
-      force => true
-    }
-
-    Hdp::Package['dashboard'] -> Hdp::Directory_recursive_create[$conf_dir]
-
-   } elsif ($service_state in ['running','installed_and_configured','stopped']) {
-      hdp::package { 'dashboard' :
-        java_needed => 'false',
-        size => 64
-       }
-     $conf_dir =  $hdp-dashboard::params::conf_dir
-  
-     hdp::directory_recursive_create { $conf_dir :
-       service_state => $service_state,
-       force => true
-     }
- 
-     hdp-dashboard::configfile { 'cluster_configuration.json' : }
-     Hdp-Dashboard::Configfile<||>{dashboard_host => $hdp::params::host_address}
-  
-     #top level does not need anchors
-     Hdp::Package['dashboard'] -> Hdp::Directory_recursive_create[$conf_dir] -> Hdp-Dashboard::Configfile<||> 
-    } else {
-     hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-   }
-}
-
-###config file helper
-define hdp-dashboard::configfile(
-  $dashboard_host = undef
-)
-{
-  
-  hdp::configfile { "${hdp-dashboard::params::conf_dir}/${name}":
-    component      => 'dashboard',
-    owner          => root,
-    group          => root,
-    dashboard_host => $dashboard_host
-  }
-}
-
-

+ 0 - 28
ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/params.pp

@@ -1,28 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-dashboard::params()
-{
-  
-  $conf_dir = "/usr/share/hdp/dashboard/dataServices/conf/" #cannot change since hard coded in rpm
-
-  $hdp_cluster_name = hdp_default("hadoop/cluster_configuration/hdp_cluster_name")
-  $scheduler_name = hdp_default("hadoop/cluster_configuration/scheduler_name")
-}

+ 0 - 97
ambari-agent/src/main/puppet/modules/hdp-dashboard/templates/cluster_configuration.json.erb

@@ -1,97 +0,0 @@
-{
-  "config_version": 1,
-  "stack_version": "1.0.2",
-  "overall": {
-    "cluster_name": "<%=scope.function_hdp_template_var("hdp_cluster_name")%>",
-    "dashboard_host": "<%=scope.function_hdp_host("public_dashboard_host")%>",
-    "dashboard_port": 80,
-    "dataservices_host": "<%=scope.function_hdp_host("public_dashboard_host")%>",
-    "dataservices_port": 80,
-    "ganglia" : {
-      "web_host": "<%=scope.function_hdp_host("public_ganglia_server_host")%>",
-      "web_port": 80,
-      "web_root": "/ganglia/?t=yes",
-      "grid_name": "HDP_GRID"
-    },
-    "nagios": {
-      "nagiosserver_host": "<%=scope.function_hdp_host("public_nagios_server_host")%>",
-      "nagiosserver_port": 80,
-      "web_root": "/nagios"
-    },
-    "jmx": {
-      "timeout": 3
-    },
-    "services": {
-	  "HDFS" : [
-        {
-          "installed": true,
-          "name": "HDFS",
-          "namenode_host": "<%=scope.function_hdp_host("public_namenode_host")%>",
-          "namenode_port": 50070,
-          "snamenode_host": "<%=scope.function_hdp_host("public_snamenode_host")%>",
-          "snamenode_port": 50090,
-          "total_datanodes": "<%=h=scope.function_hdp_template_var("slave_hosts");h.kind_of?(Array) ? h.size : ''%>",
-          "ganglia_clusters": {
-            "slaves": "HDPSlaves",
-            "namenode": "HDPNameNode"
-          }
-        }
-      ],
-      "MAPREDUCE" : [
-        {
-          "installed": true,
-          "name": "MAPREDUCE",
-          "jobtracker_host": "<%=scope.function_hdp_host("public_jtnode_host")%>",
-          "jobtracker_port": 50030,
-          "total_tasktrackers": "<%=h=scope.function_hdp_template_var("slave_hosts");h.kind_of?(Array) ? h.size : ''%>",
-          "jobhistory_host": "<%=scope.function_hdp_host("public_jtnode_host")%>",
-          "jobhistory_port": 51111,
-          "ganglia_clusters": {
-            "slaves": "HDPSlaves",
-            "jobtracker": "HDPJobTracker"
-          },
-          "scheduler_type": "<%=scope.function_hdp_template_var("scheduler_name")%>"
-        }
-      ],
-      "HBASE" : [
-        {
-          "installed": <%=not scope.function_hdp_no_hosts("public_hbase_master_hosts")%>,
-          "name": "HBASE",
-          "hbasemasters_hosts": "<%=scope.function_hdp_host("public_hbase_master_hosts")%>",
-          "hbasemasters_port": 60010,
-          "total_regionservers": "<%=h=scope.function_hdp_template_var("slave_hosts");h.kind_of?(Array) ? h.size : ''%>",
-          "ganglia_clusters": {
-            "slaves": "HDPSlaves",
-            "hbasemasters": "HDPHBaseMaster"
-          }
-        }
-      ],
-      "ZOOKEEPER" : [
-        {
-          "installed": <%=not scope.function_hdp_no_hosts("public_zookeeper_hosts")%>,
-          "name": "ZOOKEEPER"
-        }
-      ],
-      "HIVE" : [
-        {
-          "installed": <%=not scope.function_hdp_no_hosts("public_hive_server_host")%>,
-          "name": "HIVE"
-        }
-      ],
-      "TEMPLETON" : [
-        {
-          "installed": <%=not scope.function_hdp_no_hosts("public_webhcat_server_host")%>,
-          "name": "TEMPLETON"
-        }
-      ],
-      "OOZIE" : [
-        {
-          "installed": <%=not scope.function_hdp_no_hosts("public_oozie_server")%>,
-          "name": "OOZIE",
-          "oozie_host": "<%=scope.function_hdp_host("public_oozie_server")%>",
-          "oozie_port": 11000
-        }
-      ]
-    }
-  }
-}

+ 0 - 23
ambari-agent/src/main/puppet/modules/hdp-flume/files/flumeSmoke.sh

@@ -1,23 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-echo "Flume Smoke Test: Passed" 

+ 0 - 25
ambari-agent/src/main/puppet/modules/hdp-flume/manifests/client.pp

@@ -1,25 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-flume::client(
-  $service_state = $hdp::params::cluster_client_state
-) inherits hdp::params
-{
-}

+ 0 - 27
ambari-agent/src/main/puppet/modules/hdp-flume/manifests/init.pp

@@ -1,27 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-flume(
-  $type = server,
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-flume::params
-{
-}

+ 0 - 24
ambari-agent/src/main/puppet/modules/hdp-flume/manifests/params.pp

@@ -1,24 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-flume::params() inherits hdp::params
-{
-  $flume_log_dir = hdp_default("flume_log_dir","/var/log/flume")
-}

+ 0 - 28
ambari-agent/src/main/puppet/modules/hdp-flume/manifests/service.pp

@@ -1,28 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-flume::service(
-  $ensure = $hdp::params::cluster_service_state
-)
-{
-}
-
-
-

+ 0 - 24
ambari-agent/src/main/puppet/modules/hdp-flume/templates/flume-env.sh.erb

@@ -1,24 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Enviroment variables can be set here.
-#
-# #JAVA_HOME=/usr/lib/jvm/java-6-sun
-#
-# # Give Flume more memory and pre-allocate, enable remote monitoring via JMX
-JAVA_OPTS="-Xms100m -Xmx200m -Dcom.sun.management.jmxremote -Dflume.monitoring.type=ganglia -Dflume.monitoring.hosts=<%=scope.function_hdp_template_var("::hdp-flume::params::ganglia_sink")%>"
-#
-# # Note that the Flume conf directory is always included in the classpath.
-# #FLUME_CLASSPATH=""

+ 0 - 59
ambari-agent/src/main/puppet/modules/hdp-flume/templates/log4j.properties.erb

@@ -1,59 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#  http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# Define some default values that can be overridden by system properties.
-#
-# For testing, it may also be convenient to specify
-# -Dflume.root.logger=DEBUG,console when launching flume.
-
-#flume.root.logger=DEBUG,console
-flume.root.logger=INFO,LOGFILE
-flume.log.dir=<%=scope.function_hdp_template_var("::hdp-flume::params::flume_log_dir")%>
-flume.log.file=flume.log
-
-log4j.logger.org.apache.flume.lifecycle = INFO
-log4j.logger.org.jboss = WARN
-log4j.logger.org.mortbay = INFO
-log4j.logger.org.apache.avro.ipc.NettyTransceiver = WARN
-log4j.logger.org.apache.hadoop = INFO
-
-# Define the root logger to the system property "flume.root.logger".
-log4j.rootLogger=${flume.root.logger}
-
-#
-# Rolling file appender
-# Default log rotation configuration
-#
-
-log4j.appender.LOGFILE=org.apache.log4j.RollingFileAppender
-log4j.appender.LOGFILE.MaxFileSize=100MB
-log4j.appender.LOGFILE.MaxBackupIndex=10
-log4j.appender.LOGFILE.File=${flume.log.dir}/${flume.log.file}
-log4j.appender.LOGFILE.layout=org.apache.log4j.PatternLayout
-log4j.appender.LOGFILE.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss,SSS} %-5p [%t] (%C.%M:%L) %x - %m%n
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d (%t) [%p - %l] %m%n

+ 0 - 37
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkGmetad.sh

@@ -1,37 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./gmetadLib.sh;
-
-# Before checking gmetad, check rrdcached.
-./checkRrdcached.sh;
-
-gmetadRunningPid=`getGmetadRunningPid`;
-
-if [ -n "${gmetadRunningPid}" ]
-then
-  echo "${GMETAD_BIN} running with PID ${gmetadRunningPid}";
-else
-  echo "Failed to find running ${GMETAD_BIN}";
-  exit 1;
-fi

+ 0 - 62
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkGmond.sh

@@ -1,62 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
-source ./gmondLib.sh;
-
-function checkGmondForCluster()
-{
-    gmondClusterName=${1};
-
-    gmondCoreConfFileName=`getGmondCoreConfFileName ${gmondClusterName}`;
-
-    # Skip over (purported) Clusters that don't have their core conf file present.
-    if [ -e "${gmondCoreConfFileName}" ]
-    then 
-      gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-
-      if [ -n "${gmondRunningPid}" ]
-      then
-        echo "${GMOND_BIN} for cluster ${gmondClusterName} running with PID ${gmondRunningPid}";
-      else
-        echo "Failed to find running ${GMOND_BIN} for cluster ${gmondClusterName}";
-        exit 1;
-      fi
-    fi
-}
-
-# main()
-gmondClusterName=${1};
-
-if [ "x" == "x${gmondClusterName}" ]
-then
-    # No ${gmondClusterName} passed in as command-line arg, so check
-    # all the gmonds we know about.
-    for gmondClusterName in `getConfiguredGangliaClusterNames`
-    do
-        checkGmondForCluster ${gmondClusterName};
-    done
-else
-    # Just check the one ${gmondClusterName} that was asked for.
-    checkGmondForCluster ${gmondClusterName};
-fi

+ 0 - 34
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkRrdcached.sh

@@ -1,34 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./rrdcachedLib.sh;
-
-rrdcachedRunningPid=`getRrdcachedRunningPid`;
-
-if [ -n "${rrdcachedRunningPid}" ]
-then
-  echo "${RRDCACHED_BIN} running with PID ${rrdcachedRunningPid}";
-else
-  echo "Failed to find running ${RRDCACHED_BIN}";
-  exit 1;
-fi

+ 0 - 73
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmetad.init

@@ -1,73 +0,0 @@
-#!/bin/sh
-# chkconfig: 2345 70 40
-# description: hdp-gmetad startup script
-# processname: hdp-gmetad
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Remember to keep this in-sync with the definition of 
-# GANGLIA_RUNTIME_COMPONENTS_UNPACK_DIR in monrpmInstaller.sh.
-HDP_GANGLIA_RUNTIME_COMPONENTS_DIR=/usr/libexec/hdp/ganglia
-HDP_GANLIA_GMETAD_STARTER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/startGmetad.sh
-HDP_GANLIA_GMETAD_STOPPER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/stopGmetad.sh
-HDP_GANLIA_GMETAD_CHECKER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/checkGmetad.sh
-
-RETVAL=0
-
-case "$1" in
-   start)
-      echo "============================="
-      echo "Starting hdp-gmetad..."
-      echo "============================="
-      [ -f ${HDP_GANLIA_GMETAD_STARTER} ] || exit 1
-      eval "${HDP_GANLIA_GMETAD_STARTER}"
-      RETVAL=$?
-      echo
-      [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hdp-gmetad
-      ;;
-
-  stop)
-      echo "=================================="
-      echo "Shutting down hdp-gmetad..."
-      echo "=================================="
-      [ -f ${HDP_GANLIA_GMETAD_STOPPER} ] || exit 1
-      eval "${HDP_GANLIA_GMETAD_STOPPER}"
-      RETVAL=$?
-      echo
-      [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hdp-gmetad
-      ;;
-
-  restart|reload)
-   	$0 stop
-   	$0 start
-   	RETVAL=$?
-	;;
-  status)
-      echo "======================================="
-      echo "Checking status of hdp-gmetad..."
-      echo "======================================="
-      [ -f ${HDP_GANLIA_GMETAD_CHECKER} ] || exit 1
-      eval "${HDP_GANLIA_GMETAD_CHECKER}"
-      RETVAL=$?
-      ;;
-  *)
-	echo "Usage: $0 {start|stop|restart|status}"
-	exit 1
-esac
-
-exit $RETVAL

+ 0 - 204
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmetadLib.sh

@@ -1,204 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Slurp in all our user-customizable settings.
-source ./gangliaEnv.sh;
-
-# Get access to Ganglia-wide constants etc.
-source ./gangliaLib.sh;
-
-GMETAD_BIN=/usr/sbin/gmetad;
-GMETAD_CONF_FILE=${GANGLIA_CONF_DIR}/gmetad.conf;
-GMETAD_PID_FILE=${GANGLIA_RUNTIME_DIR}/gmetad.pid;
-
-function getGmetadLoggedPid()
-{
-    if [ -e "${GMETAD_PID_FILE}" ]
-    then
-        echo `cat ${GMETAD_PID_FILE}`;
-    fi
-}
-
-function getGmetadRunningPid()
-{
-    gmetadLoggedPid=`getGmetadLoggedPid`;
-
-    if [ -n "${gmetadLoggedPid}" ]
-    then
-        echo `ps -o pid=MYPID -p ${gmetadLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
-    fi
-}
-
-function generateGmetadConf()
-{
-    now=`date`;
-
-    cat <<END_OF_GMETAD_CONF_1
-#################### Generated by ${0} on ${now} ####################
-#
-#-------------------------------------------------------------------------------
-# Setting the debug_level to 1 will keep daemon in the forground and
-# show only error messages. Setting this value higher than 1 will make 
-# gmetad output debugging information and stay in the foreground.
-# default: 0
-# debug_level 10
-#
-#-------------------------------------------------------------------------------
-# What to monitor. The most important section of this file. 
-#
-# The data_source tag specifies either a cluster or a grid to
-# monitor. If we detect the source is a cluster, we will maintain a complete
-# set of RRD databases for it, which can be used to create historical 
-# graphs of the metrics. If the source is a grid (it comes from another gmetad),
-# we will only maintain summary RRDs for it.
-#
-# Format: 
-# data_source "my cluster" [polling interval] address1:port addreses2:port ...
-# 
-# The keyword 'data_source' must immediately be followed by a unique
-# string which identifies the source, then an optional polling interval in 
-# seconds. The source will be polled at this interval on average. 
-# If the polling interval is omitted, 15sec is asssumed. 
-#
-# If you choose to set the polling interval to something other than the default,
-# note that the web frontend determines a host as down if its TN value is less
-# than 4 * TMAX (20sec by default).  Therefore, if you set the polling interval
-# to something around or greater than 80sec, this will cause the frontend to
-# incorrectly display hosts as down even though they are not.
-#
-# A list of machines which service the data source follows, in the 
-# format ip:port, or name:port. If a port is not specified then 8649
-# (the default gmond port) is assumed.
-# default: There is no default value
-#
-# data_source "my cluster" 10 localhost  my.machine.edu:8649  1.2.3.5:8655
-# data_source "my grid" 50 1.3.4.7:8655 grid.org:8651 grid-backup.org:8651
-# data_source "another source" 1.3.4.7:8655  1.3.4.8
-END_OF_GMETAD_CONF_1
-
-    # Get info about all the configured Ganglia clusters.
-    getGangliaClusterInfo | while read gangliaClusterInfoLine
-    do
-        # From each, parse out ${gmondClusterName}, ${gmondMasterIP} and ${gmondPort}... 
-        read gmondClusterName gmondMasterIP gmondPort <<<`echo ${gangliaClusterInfoLine}`;
-        # ...and generate a corresponding data_source line for gmetad.conf. 
-        echo "data_source \"${gmondClusterName}\" ${gmondMasterIP}:${gmondPort}";
-    done
-
-    cat <<END_OF_GMETAD_CONF_2
-#
-# Round-Robin Archives
-# You can specify custom Round-Robin archives here (defaults are listed below)
-#
-# Old Default RRA: Keep 1 hour of metrics at 15 second resolution. 1 day at 6 minute
-# RRAs "RRA:AVERAGE:0.5:1:244" "RRA:AVERAGE:0.5:24:244" "RRA:AVERAGE:0.5:168:244" "RRA:AVERAGE:0.5:672:244" \
-#      "RRA:AVERAGE:0.5:5760:374"
-# New Default RRA
-# Keep 5856 data points at 15 second resolution assuming 15 second (default) polling. That's 1 day
-# Two weeks of data points at 1 minute resolution (average)
-#RRAs "RRA:AVERAGE:0.5:1:5856" "RRA:AVERAGE:0.5:4:20160" "RRA:AVERAGE:0.5:40:52704"
-# Retaining existing resolution
-RRAs "RRA:AVERAGE:0.5:1:244" "RRA:AVERAGE:0.5:24:244" "RRA:AVERAGE:0.5:168:244" "RRA:AVERAGE:0.5:672:244" \
-     "RRA:AVERAGE:0.5:5760:374"
-#
-#-------------------------------------------------------------------------------
-# Scalability mode. If on, we summarize over downstream grids, and respect
-# authority tags. If off, we take on 2.5.0-era behavior: we do not wrap our output
-# in <GRID></GRID> tags, we ignore all <GRID> tags we see, and always assume
-# we are the "authority" on data source feeds. This approach does not scale to
-# large groups of clusters, but is provided for backwards compatibility.
-# default: on
-# scalable off
-#
-#-------------------------------------------------------------------------------
-# The name of this Grid. All the data sources above will be wrapped in a GRID
-# tag with this name.
-# default: unspecified
-gridname "HDP_GRID"
-#
-#-------------------------------------------------------------------------------
-# The authority URL for this grid. Used by other gmetads to locate graphs
-# for our data sources. Generally points to a ganglia/
-# website on this machine.
-# default: "http://hostname/ganglia/",
-#   where hostname is the name of this machine, as defined by gethostname().
-# authority "http://mycluster.org/newprefix/"
-#
-#-------------------------------------------------------------------------------
-# List of machines this gmetad will share XML with. Localhost
-# is always trusted. 
-# default: There is no default value
-# trusted_hosts 127.0.0.1 169.229.50.165 my.gmetad.org
-#
-#-------------------------------------------------------------------------------
-# If you want any host which connects to the gmetad XML to receive
-# data, then set this value to "on"
-# default: off
-# all_trusted on
-#
-#-------------------------------------------------------------------------------
-# If you don't want gmetad to setuid then set this to off
-# default: on
-# setuid off
-#
-#-------------------------------------------------------------------------------
-# User gmetad will setuid to (defaults to "nobody")
-# default: "nobody"
-setuid_username "${GMETAD_USER}"
-#
-#-------------------------------------------------------------------------------
-# Umask to apply to created rrd files and grid directory structure
-# default: 0 (files are public)
-# umask 022
-#
-#-------------------------------------------------------------------------------
-# The port gmetad will answer requests for XML
-# default: 8651
-# xml_port 8651
-#
-#-------------------------------------------------------------------------------
-# The port gmetad will answer queries for XML. This facility allows
-# simple subtree and summation views of the XML tree.
-# default: 8652
-# interactive_port 8652
-#
-#-------------------------------------------------------------------------------
-# The number of threads answering XML requests
-# default: 4
-# server_threads 10
-#
-#-------------------------------------------------------------------------------
-# Where gmetad stores its round-robin databases
-# default: "/var/lib/ganglia/rrds"
-# rrd_rootdir "/some/other/place"
-#
-#-------------------------------------------------------------------------------
-# In earlier versions of gmetad, hostnames were handled in a case
-# sensitive manner
-# If your hostname directories have been renamed to lower case,
-# set this option to 0 to disable backward compatibility.
-# From version 3.2, backwards compatibility will be disabled by default.
-# default: 1   (for gmetad < 3.2)
-# default: 0   (for gmetad >= 3.2)
-case_sensitive_hostnames 1
-END_OF_GMETAD_CONF_2
-}

+ 0 - 73
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmond.init

@@ -1,73 +0,0 @@
-#!/bin/sh
-# chkconfig: 2345 70 40
-# description: hdp-gmond startup script
-# processname: hdp-gmond
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Remember to keep this in-sync with the definition of 
-# GANGLIA_RUNTIME_COMPONENTS_UNPACK_DIR in monrpmInstaller.sh.
-HDP_GANGLIA_RUNTIME_COMPONENTS_DIR=/usr/libexec/hdp/ganglia
-HDP_GANLIA_GMOND_STARTER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/startGmond.sh
-HDP_GANLIA_GMOND_STOPPER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/stopGmond.sh
-HDP_GANLIA_GMOND_CHECKER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/checkGmond.sh
-
-RETVAL=0
-
-case "$1" in
-   start)
-      echo "============================="
-      echo "Starting hdp-gmond..."
-      echo "============================="
-      [ -f ${HDP_GANLIA_GMOND_STARTER} ] || exit 1
-      eval "${HDP_GANLIA_GMOND_STARTER}"
-      RETVAL=$?
-      echo
-      [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hdp-gmond
-      ;;
-
-  stop)
-      echo "=================================="
-      echo "Shutting down hdp-gmond..."
-      echo "=================================="
-      [ -f ${HDP_GANLIA_GMOND_STOPPER} ] || exit 1
-      eval "${HDP_GANLIA_GMOND_STOPPER}"
-      RETVAL=$?
-      echo
-      [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hdp-gmond
-      ;;
-
-  restart|reload)
-   	$0 stop
-   	$0 start
-   	RETVAL=$?
-	;;
-  status)
-      echo "======================================="
-      echo "Checking status of hdp-gmond..."
-      echo "======================================="
-      [ -f ${HDP_GANLIA_GMOND_CHECKER} ] || exit 1
-      eval "${HDP_GANLIA_GMOND_CHECKER}"
-      RETVAL=$?
-      ;;
-  *)
-	echo "Usage: $0 {start|stop|restart|status}"
-	exit 1
-esac
-
-exit $RETVAL

+ 0 - 556
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmondLib.sh

@@ -1,556 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Slurp in all our user-customizable settings.
-source ./gangliaEnv.sh;
-
-# Get access to Ganglia-wide constants etc.
-source ./gangliaLib.sh;
-
-GMOND_BIN=/usr/sbin/gmond;
-GMOND_CORE_CONF_FILE=gmond.core.conf;
-GMOND_MASTER_CONF_FILE=gmond.master.conf;
-GMOND_SLAVE_CONF_FILE=gmond.slave.conf;
-GMOND_PID_FILE=gmond.pid;
-
-# Functions.
-function getGmondCoreConfFileName()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # ${clusterName} is not empty. 
-        echo "${GANGLIA_CONF_DIR}/${clusterName}/${GMOND_CORE_CONF_FILE}";
-    else
-        echo "${GANGLIA_CONF_DIR}/${GMOND_CORE_CONF_FILE}";
-    fi
-}
-
-function getGmondMasterConfFileName()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # ${clusterName} is not empty. 
-        echo "${GANGLIA_CONF_DIR}/${clusterName}/conf.d/${GMOND_MASTER_CONF_FILE}";
-    else
-        echo "${GANGLIA_CONF_DIR}/conf.d/${GMOND_MASTER_CONF_FILE}";
-    fi
-}
-
-function getGmondSlaveConfFileName()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # ${clusterName} is not empty. 
-        echo "${GANGLIA_CONF_DIR}/${clusterName}/conf.d/${GMOND_SLAVE_CONF_FILE}";
-    else
-        echo "${GANGLIA_CONF_DIR}/conf.d/${GMOND_SLAVE_CONF_FILE}";
-    fi
-}
-
-function getGmondPidFileName()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # ${clusterName} is not empty. 
-        echo "${GANGLIA_RUNTIME_DIR}/${clusterName}/${GMOND_PID_FILE}";
-    else
-        echo "${GANGLIA_RUNTIME_DIR}/${GMOND_PID_FILE}";
-    fi
-}
-
-function removeGmondPidFileName()
-{
-    clusterName=${1};
-    gmondPidFileName=`getGmondPidFileName ${clusterName}`;
-    if [ -e "${gmondPidFileName}" ]; 
-     then
-      rm -rf ${gmondPidFileName};          
-    fi 
-}
-
-
-function getGmondLoggedPid()
-{
-    gmondPidFile=`getGmondPidFileName ${1}`;
-
-    if [ -e "${gmondPidFile}" ]
-    then
-        echo `cat ${gmondPidFile}`;
-    fi
-}
-
-function getGmondRunningPid()
-{
-    gmondLoggedPid=`getGmondLoggedPid ${1}`;
-
-    if [ -n "${gmondLoggedPid}" ]
-    then
-        echo `ps -o pid=MYPID -p ${gmondLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
-    fi
-}
-
-function generateGmondCoreConf()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
-
-        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
-        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
-        then
-            now=`date`;
-
-            cat << END_OF_GMOND_CORE_CONF
-#################### Generated by ${0} on ${now} ####################
-#
-/* This configuration is as close to 2.5.x default behavior as possible
-   The values closely match ./gmond/metric.h definitions in 2.5.x */
-globals {
-  daemonize = yes
-  setuid = yes
-  user = ${GMOND_USER}
-  debug_level = 0
-  max_udp_msg_len = 1472
-  mute = no
-  deaf = no 
-  allow_extra_data = yes
-  host_dmax = 0 /*secs */
-  host_tmax = 20 /*secs */
-  cleanup_threshold = 300 /*secs */
-  gexec = no
-  send_metadata_interval = 30 /*secs */
-}
-
-/*
- * The cluster attributes specified will be used as part of the <CLUSTER>
- * tag that will wrap all hosts collected by this instance.
- */
-cluster {
-  name = "${gmondClusterName}"
-  owner = "unspecified"
-  latlong = "unspecified"
-  url = "unspecified"
-}
-
-/* The host section describes attributes of the host, like the location */
-host {
-  location = "unspecified"
-}
-
-/* You can specify as many tcp_accept_channels as you like to share
- * an XML description of the state of the cluster.
- *
- * At the very least, every gmond must expose its XML state to 
- * queriers from localhost.
- */
-tcp_accept_channel {
-  bind = localhost
-  port = ${gmondPort}
-}
-
-/* Each metrics module that is referenced by gmond must be specified and
-   loaded. If the module has been statically linked with gmond, it does
-   not require a load path. However all dynamically loadable modules must
-   include a load path. */
-modules {
-  module {
-    name = "core_metrics"
-  }
-  module {
-    name = "cpu_module"
-    path = "modcpu.so"
-  }
-  module {
-    name = "disk_module"
-    path = "moddisk.so"
-  }
-  module {
-    name = "load_module"
-    path = "modload.so"
-  }
-  module {
-    name = "mem_module"
-    path = "modmem.so"
-  }
-  module {
-    name = "net_module"
-    path = "modnet.so"
-  }
-  module {
-    name = "proc_module"
-    path = "modproc.so"
-  }
-  module {
-    name = "sys_module"
-    path = "modsys.so"
-  }
-}
-
-/* The old internal 2.5.x metric array has been replaced by the following
-   collection_group directives.  What follows is the default behavior for
-   collecting and sending metrics that is as close to 2.5.x behavior as
-   possible. */
-
-/* This collection group will cause a heartbeat (or beacon) to be sent every
-   20 seconds.  In the heartbeat is the GMOND_STARTED data which expresses
-   the age of the running gmond. */
-collection_group {
-  collect_once = yes
-  time_threshold = 20
-  metric {
-    name = "heartbeat"
-  }
-}
-
-/* This collection group will send general info about this host total memory every
-   180 secs.
-   This information doesn't change between reboots and is only collected
-   once. This information needed for heatmap showing */
- collection_group {
-   collect_once = yes
-   time_threshold = 180
-   metric {
-    name = "mem_total"
-    title = "Memory Total"
-   }
- }
-
-/* This collection group will send general info about this host every
-   1200 secs.
-   This information doesn't change between reboots and is only collected
-   once. */
-collection_group {
-  collect_once = yes
-  time_threshold = 1200
-  metric {
-    name = "cpu_num"
-    title = "CPU Count"
-  }
-  metric {
-    name = "cpu_speed"
-    title = "CPU Speed"
-  }
-  /* Should this be here? Swap can be added/removed between reboots. */
-  metric {
-    name = "swap_total"
-    title = "Swap Space Total"
-  }
-  metric {
-    name = "boottime"
-    title = "Last Boot Time"
-  }
-  metric {
-    name = "machine_type"
-    title = "Machine Type"
-  }
-  metric {
-    name = "os_name"
-    title = "Operating System"
-  }
-  metric {
-    name = "os_release"
-    title = "Operating System Release"
-  }
-  metric {
-    name = "location"
-    title = "Location"
-  }
-}
-
-/* This collection group will send the status of gexecd for this host
-   every 300 secs.*/
-/* Unlike 2.5.x the default behavior is to report gexecd OFF. */
-collection_group {
-  collect_once = yes
-  time_threshold = 300
-  metric {
-    name = "gexec"
-    title = "Gexec Status"
-  }
-}
-
-/* This collection group will collect the CPU status info every 20 secs.
-   The time threshold is set to 90 seconds.  In honesty, this
-   time_threshold could be set significantly higher to reduce
-   unneccessary  network chatter. */
-collection_group {
-  collect_every = 20
-  time_threshold = 90
-  /* CPU status */
-  metric {
-    name = "cpu_user"
-    value_threshold = "1.0"
-    title = "CPU User"
-  }
-  metric {
-    name = "cpu_system"
-    value_threshold = "1.0"
-    title = "CPU System"
-  }
-  metric {
-    name = "cpu_idle"
-    value_threshold = "5.0"
-    title = "CPU Idle"
-  }
-  metric {
-    name = "cpu_nice"
-    value_threshold = "1.0"
-    title = "CPU Nice"
-  }
-  metric {
-    name = "cpu_aidle"
-    value_threshold = "5.0"
-    title = "CPU aidle"
-  }
-  metric {
-    name = "cpu_wio"
-    value_threshold = "1.0"
-    title = "CPU wio"
-  }
-  /* The next two metrics are optional if you want more detail...
-     ... since they are accounted for in cpu_system.
-  metric {
-    name = "cpu_intr"
-    value_threshold = "1.0"
-    title = "CPU intr"
-  }
-  metric {
-    name = "cpu_sintr"
-    value_threshold = "1.0"
-    title = "CPU sintr"
-  }
-  */
-}
-
-collection_group {
-  collect_every = 20
-  time_threshold = 90
-  /* Load Averages */
-  metric {
-    name = "load_one"
-    value_threshold = "1.0"
-    title = "One Minute Load Average"
-  }
-  metric {
-    name = "load_five"
-    value_threshold = "1.0"
-    title = "Five Minute Load Average"
-  }
-  metric {
-    name = "load_fifteen"
-    value_threshold = "1.0"
-    title = "Fifteen Minute Load Average"
-  }
-}
-
-/* This group collects the number of running and total processes */
-collection_group {
-  collect_every = 80
-  time_threshold = 950
-  metric {
-    name = "proc_run"
-    value_threshold = "1.0"
-    title = "Total Running Processes"
-  }
-  metric {
-    name = "proc_total"
-    value_threshold = "1.0"
-    title = "Total Processes"
-  }
-}
-
-/* This collection group grabs the volatile memory metrics every 40 secs and
-   sends them at least every 180 secs.  This time_threshold can be increased
-   significantly to reduce unneeded network traffic. */
-collection_group {
-  collect_every = 40
-  time_threshold = 180
-  metric {
-    name = "mem_free"
-    value_threshold = "1024.0"
-    title = "Free Memory"
-  }
-  metric {
-    name = "mem_shared"
-    value_threshold = "1024.0"
-    title = "Shared Memory"
-  }
-  metric {
-    name = "mem_buffers"
-    value_threshold = "1024.0"
-    title = "Memory Buffers"
-  }
-  metric {
-    name = "mem_cached"
-    value_threshold = "1024.0"
-    title = "Cached Memory"
-  }
-  metric {
-    name = "swap_free"
-    value_threshold = "1024.0"
-    title = "Free Swap Space"
-  }
-}
-
-collection_group {
-  collect_every = 40
-  time_threshold = 300
-  metric {
-    name = "bytes_out"
-    value_threshold = 4096
-    title = "Bytes Sent"
-  }
-  metric {
-    name = "bytes_in"
-    value_threshold = 4096
-    title = "Bytes Received"
-  }
-  metric {
-    name = "pkts_in"
-    value_threshold = 256
-    title = "Packets Received"
-  }
-  metric {
-    name = "pkts_out"
-    value_threshold = 256
-    title = "Packets Sent"
-  }
-}
-
-
-collection_group {
-  collect_every = 40
-  time_threshold = 180
-  metric {
-    name = "disk_free"
-    value_threshold = 1.0
-    title = "Disk Space Available"
-  }
-  metric {
-    name = "part_max_used"
-    value_threshold = 1.0
-    title = "Maximum Disk Space Used"
-  }
-  metric {
-    name = "disk_total"
-    value_threshold = 1.0
-    title = "Total Disk Space"
-  }
-}
-
-udp_recv_channel {
-    port = 0
-}
-
-
-include ("${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d/*.conf")
-END_OF_GMOND_CORE_CONF
-        else
-            return 2;
-        fi
-    else
-        return 1;
-    fi
-}
-
-function generateGmondMasterConf
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
-
-        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
-        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
-        then
-            now=`date`;
-
-            cat << END_OF_GMOND_MASTER_CONF
-#################### Generated by ${0} on ${now} ####################
-/* Masters only receive; they never send. */
-udp_recv_channel {
-  bind = ${gmondMasterIP}
-  port = ${gmondPort}
-}
-
-/* The gmond cluster master must additionally provide an XML 
- * description of the cluster to the gmetad that will query it.
- */
-tcp_accept_channel {
-  bind = ${gmondMasterIP}
-  port = ${gmondPort}
-}
-END_OF_GMOND_MASTER_CONF
-        else
-            return 2;
-        fi
-    else
-        return 1;
-    fi
-}
-
-function generateGmondSlaveConf
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
-
-        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
-        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
-        then
-            now=`date`;
-
-            cat << END_OF_GMOND_SLAVE_CONF
-#################### Generated by ${0} on ${now} ####################
-/* Slaves only send; they never receive. */
-udp_send_channel {
-  #bind_hostname = yes # Highly recommended, soon to be default.
-                       # This option tells gmond to use a source address
-                       # that resolves to the machine's hostname.  Without
-                       # this, the metrics may appear to come from any
-                       # interface and the DNS names associated with
-                       # those IPs will be used to create the RRDs.
-  host = ${gmondMasterIP}
-  port = ${gmondPort}
-  ttl = 1
-}
-END_OF_GMOND_SLAVE_CONF
-        else
-            return 2;
-        fi
-    else
-        return 1;
-    fi
-}

+ 0 - 207
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/rrd.py

@@ -1,207 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import cgi
-import os
-import rrdtool
-import sys
-import time
-import re
-import urlparse
-
-# place this script in /var/www/cgi-bin of the Ganglia collector
-# requires 'yum install rrdtool-python' on the Ganglia collector
-
-
-def printMetric(clusterName, hostName, metricName, file, cf, start, end, resolution, pointInTime):
-  if clusterName.endswith("rrds"):
-    clusterName = ""
-
-  args = [file, cf]
-
-  if start is not None:
-    args.extend(["-s", start])
-
-  if end is not None:
-    args.extend(["-e", end])
-
-  if resolution is not None:
-    args.extend(["-r", resolution])
-
-  rrdMetric = rrdtool.fetch(args)
-  # ds_name
-  sys.stdout.write(rrdMetric[1][0])
-  sys.stdout.write("\n")
-
-  sys.stdout.write(clusterName)
-  sys.stdout.write("\n")
-  sys.stdout.write(hostName)
-  sys.stdout.write("\n")
-  sys.stdout.write(metricName)
-  sys.stdout.write("\n")
-
-  # write time
-  sys.stdout.write(str(rrdMetric[0][0]))
-  sys.stdout.write("\n")
-  # write step
-  sys.stdout.write(str(rrdMetric[0][2]))
-  sys.stdout.write("\n")
-
-  if not pointInTime:
-    valueCount = 0
-    lastValue = None
-
-    for tuple in rrdMetric[2]:
-
-      thisValue = tuple[0]
-
-      if valueCount > 0 and thisValue == lastValue:
-        valueCount += 1
-      else:
-        if valueCount > 1:
-          sys.stdout.write("[~r]")
-          sys.stdout.write(str(valueCount))
-          sys.stdout.write("\n")
-
-        if thisValue is None:
-          sys.stdout.write("[~n]\n")
-        else:
-          sys.stdout.write(str(thisValue))
-          sys.stdout.write("\n")
-
-        valueCount = 1
-        lastValue = thisValue
-  else:
-    value = None
-    idx   = -1
-    tuple = rrdMetric[2]
-    tupleLastIdx = len(tuple) * -1
-
-    while value is None and idx >= tupleLastIdx:
-      value = tuple[idx][0]
-      idx-=1
-
-    if value is not None:
-      sys.stdout.write(str(value))
-      sys.stdout.write("\n")
-
-  sys.stdout.write("[~EOM]\n")
-  return
-
-def stripList(l):
-  return([x.strip() for x in l])
-
-sys.stdout.write("Content-type: text/plain\n\n")
-
-# write start time
-sys.stdout.write(str(time.mktime(time.gmtime())))
-sys.stdout.write("\n")
-
-requestMethod = os.environ['REQUEST_METHOD']
-
-if requestMethod == 'POST':
-  postData = sys.stdin.readline()
-  queryString = cgi.parse_qs(postData)
-  queryString = dict((k, v[0]) for k, v in queryString.items())
-elif requestMethod == 'GET':
-  queryString = dict(cgi.parse_qsl(os.environ['QUERY_STRING']));
-
-if "m" in queryString:
-  metricParts = queryString["m"].split(",")
-else:
-  metricParts = [""]
-metricParts = stripList(metricParts)
-
-hostParts = []
-if "h" in queryString:
-  hostParts = queryString["h"].split(",")
-hostParts = stripList(hostParts)
-
-if "c" in queryString:
-  clusterParts = queryString["c"].split(",")
-else:
-  clusterParts = [""]
-clusterParts = stripList(clusterParts)
-
-if "p" in queryString:
-  rrdPath = queryString["p"]
-else:
-  rrdPath = "/var/lib/ganglia/rrds/"
-
-start = None
-if "s" in queryString:
-  start = queryString["s"]
-
-end = None
-if "e" in queryString:
-  end = queryString["e"]
-
-resolution = None
-if "r" in queryString:
-  resolution = queryString["r"]
-
-if "cf" in queryString:
-  cf = queryString["cf"]
-else:
-  cf = "AVERAGE"
-
-if "pt" in queryString:
-  pointInTime = True
-else:
-  pointInTime = False
-
-def _walk(*args, **kwargs):
-
-  for root,dirs,files in os.walk(*args, **kwargs):
-    for dir in dirs:
-      qualified_dir = os.path.join(root,dir)
-      if os.path.islink(qualified_dir):
-        for x in os.walk(qualified_dir, **kwargs):
-          yield x
-    yield (root, dirs, files)
-
-
-for cluster in clusterParts:
-  for path, dirs, files in _walk(rrdPath + cluster):
-    pathParts = path.split("/")
-    #Process only path which contains files. If no host parameter passed - process all hosts folders and summary info
-    #If host parameter passed - process only this host folder
-    if len(files) > 0 and (len(hostParts) == 0 or pathParts[-1] in hostParts):
-      for metric in metricParts:
-        file = metric + ".rrd"
-        fileFullPath = os.path.join(path, file)
-        if os.path.exists(fileFullPath):
-          #Exact name of metric
-          printMetric(pathParts[-2], pathParts[-1], file[:-4], os.path.join(path, file), cf, start, end, resolution, pointInTime)
-        else:
-          #Regex as metric name
-          metricRegex = metric + '\.rrd$'
-          p = re.compile(metricRegex)
-          matchedFiles = filter(p.match, files)
-          for matchedFile in matchedFiles:
-            printMetric(pathParts[-2], pathParts[-1], matchedFile[:-4], os.path.join(path, matchedFile), cf, start, end, resolution, pointInTime)
-
-
-sys.stdout.write("[~EOF]\n")
-# write end time
-sys.stdout.write(str(time.mktime(time.gmtime())))
-sys.stdout.write("\n")
-
-sys.stdout.flush

+ 0 - 47
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/rrdcachedLib.sh

@@ -1,47 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get access to Ganglia-wide constants etc.
-source ./gangliaLib.sh;
-
-RRDCACHED_BIN=/usr/bin/rrdcached;
-RRDCACHED_PID_FILE=${GANGLIA_RUNTIME_DIR}/rrdcached.pid;
-RRDCACHED_ALL_ACCESS_UNIX_SOCKET=${GANGLIA_RUNTIME_DIR}/rrdcached.sock;
-RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET=${GANGLIA_RUNTIME_DIR}/rrdcached.limited.sock;
-
-function getRrdcachedLoggedPid()
-{
-    if [ -e "${RRDCACHED_PID_FILE}" ]
-    then
-        echo `cat ${RRDCACHED_PID_FILE}`;
-    fi
-}
-
-function getRrdcachedRunningPid()
-{
-    rrdcachedLoggedPid=`getRrdcachedLoggedPid`;
-
-    if [ -n "${rrdcachedLoggedPid}" ]
-    then
-        echo `ps -o pid=MYPID -p ${rrdcachedLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
-    fi
-}

+ 0 - 141
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/setupGanglia.sh

@@ -1,141 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get access to Ganglia-wide constants, utilities etc.
-source ./gangliaLib.sh
-
-function usage()
-{
-  cat << END_USAGE
-Usage: ${0} [-c <gmondClusterName> [-m]] [-t] [-o <owner>] [-g <group>]
-
-Options:
-  -c <gmondClusterName>   The name of the Ganglia Cluster whose gmond configuration we're here to generate.
-
-  -m                      Whether this gmond (if -t is not specified) is the master for its Ganglia 
-                          Cluster. Without this, we generate slave gmond configuration.
-
-  -t                      Whether this is a call to generate gmetad configuration (as opposed to the
-                          gmond configuration that is generated without this).
-  -o <owner>              Owner
-  -g <group>              Group
-END_USAGE
-}
-
-function instantiateGmetadConf()
-{
-  # gmetad utility library.
-  source ./gmetadLib.sh;
-
-  generateGmetadConf > ${GMETAD_CONF_FILE};
-}
-
-function instantiateGmondConf()
-{
-  # gmond utility library.
-  source ./gmondLib.sh;
- 
-  gmondClusterName=${1};
-
-  if [ "x" != "x${gmondClusterName}" ]
-  then
-
-    createDirectory "${GANGLIA_RUNTIME_DIR}/${gmondClusterName}";
-    createDirectory "${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d";
-    
-    # Always blindly generate the core gmond config - that goes on every box running gmond. 
-    generateGmondCoreConf ${gmondClusterName} > `getGmondCoreConfFileName ${gmondClusterName}`;
-
-    isMasterGmond=${2};
-
-    # Decide whether we want to add on the master or slave gmond config.
-    if [ "0" -eq "${isMasterGmond}" ]
-    then
-      generateGmondSlaveConf ${gmondClusterName} > `getGmondSlaveConfFileName ${gmondClusterName}`;
-    else
-      generateGmondMasterConf ${gmondClusterName} > `getGmondMasterConfFileName ${gmondClusterName}`;
-    fi
-
-    chown -R ${3}:${4} ${GANGLIA_CONF_DIR}/${gmondClusterName}
-
-  else
-    echo "No gmondClusterName passed in, nothing to instantiate";
-  fi
-}
-
-# main()
-
-gmondClusterName=;
-isMasterGmond=0;
-configureGmetad=0;
-owner='root';
-group='root';
-
-while getopts ":c:mto:g:" OPTION
-do
-  case ${OPTION} in
-    c) 
-      gmondClusterName=${OPTARG};
-      ;;
-    m)
-      isMasterGmond=1;
-      ;;
-    t)
-      configureGmetad=1;
-      ;;
-    o)
-      owner=${OPTARG};
-      ;;
-    g)
-      group=${OPTARG};
-      ;;
-    ?)
-      usage;
-      exit 1;
-  esac
-done
-
-# Initialization.
-createDirectory ${GANGLIA_CONF_DIR};
-createDirectory ${GANGLIA_RUNTIME_DIR};
-# So rrdcached can drop its PID files in here.
-chmod a+w ${GANGLIA_RUNTIME_DIR};
-chown ${owner}:${group} ${GANGLIA_CONF_DIR};
-
-if [ -n "${gmondClusterName}" ]
-then
-
-  # Be forgiving of users who pass in -c along with -t (which always takes precedence).
-  if [ "1" -eq "${configureGmetad}" ]
-  then
-    instantiateGmetadConf;
-  else
-    instantiateGmondConf ${gmondClusterName} ${isMasterGmond} ${owner} ${group};
-  fi
-
-elif [ "1" -eq "${configureGmetad}" ]
-then
-  instantiateGmetadConf;
-else
-  usage;
-  exit 2;
-fi

+ 0 - 64
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startGmetad.sh

@@ -1,64 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./gmetadLib.sh;
-
-# To get access to ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET}.
-source ./rrdcachedLib.sh;
-
-# Before starting gmetad, start rrdcached.
-./startRrdcached.sh;
-
-if [ $? -eq 0 ] 
-then
-    gmetadRunningPid=`getGmetadRunningPid`;
-
-    # Only attempt to start gmetad if there's not already one running.
-    if [ -z "${gmetadRunningPid}" ]
-    then
-        env RRDCACHED_ADDRESS=${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
-                    ${GMETAD_BIN} --conf=${GMETAD_CONF_FILE} --pid-file=${GMETAD_PID_FILE};
-
-        for i in `seq 0 5`; do
-          gmetadRunningPid=`getGmetadRunningPid`;
-          if [ -n "${gmetadRunningPid}" ]
-          then
-            break;
-          fi
-          sleep 1;
-        done
-
-        if [ -n "${gmetadRunningPid}" ]
-        then
-            echo "Started ${GMETAD_BIN} with PID ${gmetadRunningPid}";
-        else
-            echo "Failed to start ${GMETAD_BIN}";
-            exit 1;
-        fi
-    else
-        echo "${GMETAD_BIN} already running with PID ${gmetadRunningPid}";
-    fi
-else
-    echo "Not starting ${GMETAD_BIN} because starting ${RRDCACHED_BIN} failed.";
-    exit 2;
-fi

+ 0 - 80
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startGmond.sh

@@ -1,80 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
-source ./gmondLib.sh;
-
-function startGmondForCluster()
-{
-    gmondClusterName=${1};
-
-    gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-
-    # Only attempt to start gmond if there's not already one running.
-    if [ -z "${gmondRunningPid}" ]
-    then
-      gmondCoreConfFileName=`getGmondCoreConfFileName ${gmondClusterName}`;
-      removeGmondPidFileName ${gmondClusterName};
-      if [ -e "${gmondCoreConfFileName}" ]
-      then 
-        gmondPidFileName=`getGmondPidFileName ${gmondClusterName}`;
-
-        ${GMOND_BIN} --conf=${gmondCoreConfFileName} --pid-file=${gmondPidFileName};
-
-        for i in `seq 0 5`; do
-          gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-          if [ -n "${gmondRunningPid}" ]
-          then
-            break;
-          fi
-          sleep 1;
-        done
-  
-        if [ -n "${gmondRunningPid}" ]
-        then
-            echo "Started ${GMOND_BIN} for cluster ${gmondClusterName} with PID ${gmondRunningPid}";
-        else
-            echo "Failed to start ${GMOND_BIN} for cluster ${gmondClusterName}";
-            exit 1;
-        fi
-      fi 
-    else
-      echo "${GMOND_BIN} for cluster ${gmondClusterName} already running with PID ${gmondRunningPid}";
-    fi
-}
-
-# main()
-gmondClusterName=${1};
-
-if [ "x" == "x${gmondClusterName}" ]
-then
-    # No ${gmondClusterName} passed in as command-line arg, so start 
-    # all the gmonds we know about.
-    for gmondClusterName in `getConfiguredGangliaClusterNames`
-    do
-        startGmondForCluster ${gmondClusterName};
-    done
-else
-    # Just start the one ${gmondClusterName} that was asked for.
-    startGmondForCluster ${gmondClusterName};
-fi

+ 0 - 69
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startRrdcached.sh

@@ -1,69 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Slurp in all our user-customizable settings.
-source ./gangliaEnv.sh;
-
-# Get all our common constants etc. set up.
-source ./rrdcachedLib.sh;
-
-rrdcachedRunningPid=`getRrdcachedRunningPid`;
-
-# Only attempt to start rrdcached if there's not already one running.
-if [ -z "${rrdcachedRunningPid}" ]
-then
-    #changed because problem puppet had with nobody user
-    #sudo -u ${GMETAD_USER} ${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
-    #         -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
-    #         -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
-    #         -b /var/lib/ganglia/rrds -B
-    su - ${GMETAD_USER} -c "${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
-             -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
-             -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
-             -b ${RRDCACHED_BASE_DIR} -B"
-
-    # Ideally, we'd use ${RRDCACHED_BIN}'s -s ${WEBSERVER_GROUP} option for 
-    # this, but it doesn't take sometimes due to a lack of permissions,
-    # so perform the operation explicitly to be super-sure.
-    chgrp ${WEBSERVER_GROUP} ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET};
-    chgrp ${WEBSERVER_GROUP} ${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET};
-
-    # Check to make sure rrdcached actually started up.
-    for i in `seq 0 5`; do
-      rrdcachedRunningPid=`getRrdcachedRunningPid`;
-      if [ -n "${rrdcachedRunningPid}" ]
-        then
-          break;
-      fi
-      sleep 1;
-    done
-
-    if [ -n "${rrdcachedRunningPid}" ]
-    then
-        echo "Started ${RRDCACHED_BIN} with PID ${rrdcachedRunningPid}";
-    else
-        echo "Failed to start ${RRDCACHED_BIN}";
-        exit 1;
-    fi
-else
-    echo "${RRDCACHED_BIN} already running with PID ${rrdcachedRunningPid}";
-fi

+ 0 - 43
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopGmetad.sh

@@ -1,43 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./gmetadLib.sh;
-
-gmetadRunningPid=`getGmetadRunningPid`;
-
-# Only go ahead with the termination if we could find a running PID.
-if [ -n "${gmetadRunningPid}" ]
-then
-    kill -KILL ${gmetadRunningPid};
-    echo "Stopped ${GMETAD_BIN} (with PID ${gmetadRunningPid})";
-fi
-
-# Poll again.
-gmetadRunningPid=`getGmetadRunningPid`;
-
-# Once we've killed gmetad, there should no longer be a running PID.
-if [ -z "${gmetadRunningPid}" ]
-then
-    # It's safe to stop rrdcached now.
-    ./stopRrdcached.sh;
-fi

+ 0 - 55
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopGmond.sh

@@ -1,55 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
-source ./gmondLib.sh;
-
-function stopGmondForCluster()
-{
-    gmondClusterName=${1};
-
-    gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-
-    # Only go ahead with the termination if we could find a running PID.
-    if [ -n "${gmondRunningPid}" ]
-    then
-      kill -KILL ${gmondRunningPid};
-      removeGmondPidFileName ${gmondClusterName};
-      echo "Stopped ${GMOND_BIN} for cluster ${gmondClusterName} (with PID ${gmondRunningPid})";
-    fi
-}
-
-# main()
-gmondClusterName=${1};
-
-if [ "x" == "x${gmondClusterName}" ]
-then
-    # No ${gmondClusterName} passed in as command-line arg, so stop
-    # all the gmonds we know about.
-    for gmondClusterName in `getConfiguredGangliaClusterNames`
-    do
-        stopGmondForCluster ${gmondClusterName};
-    done
-else
-    stopGmondForCluster ${gmondClusterName};
-fi

+ 0 - 41
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopRrdcached.sh

@@ -1,41 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./rrdcachedLib.sh;
-
-rrdcachedRunningPid=`getRrdcachedRunningPid`;
-
-# Only go ahead with the termination if we could find a running PID.
-if [ -n "${rrdcachedRunningPid}" ]
-then
-    kill -TERM ${rrdcachedRunningPid};
-    # ${RRDCACHED_BIN} takes a few seconds to drain its buffers, so wait 
-    # until we're sure it's well and truly dead. 
-    #
-    # Without this, an immediately following startRrdcached.sh won't do
-    # anything, because it still sees this soon-to-die instance alive,
-    # and the net result is that after a few seconds, there's no
-    # ${RRDCACHED_BIN} running on the box anymore.
-    sleep 5;
-    echo "Stopped ${RRDCACHED_BIN} (with PID ${rrdcachedRunningPid})";
-fi 

+ 0 - 28
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/teardownGanglia.sh

@@ -1,28 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get access to Ganglia-wide constants, utilities etc.
-source ./gangliaLib.sh;
-
-# Undo what we did while setting up Ganglia on this box.
-rm -rf ${GANGLIA_CONF_DIR};
-rm -rf ${GANGLIA_RUNTIME_DIR};

+ 0 - 79
ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/config.pp

@@ -1,79 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-ganglia::config(
-  $ganglia_server_host = undef,
-  $service_state = $hdp::params::cluster_service_state
-)
-{
- if ($service_state in ['running','installed_and_configured','stopped']) {
-    #TODO: divide into what is needed on server vs what is needed on monitored nodes
-    $shell_cmds_dir = $hdp-ganglia::params::ganglia_shell_cmds_dir
-    $shell_files = ['checkGmond.sh','checkRrdcached.sh','gmetadLib.sh','gmondLib.sh','rrdcachedLib.sh' ,'setupGanglia.sh','startGmetad.sh','startGmond.sh','startRrdcached.sh','stopGmetad.sh','stopGmond.sh','stopRrdcached.sh','teardownGanglia.sh']
-
-    hdp::directory_recursive_create { $shell_cmds_dir :
-      owner => root,
-      group => root
-    } 
-
-     hdp-ganglia::config::init_file { ['gmetad','gmond']: }
-
-     hdp-ganglia::config::shell_file { $shell_files: }                       
-
-     hdp-ganglia::config::file { ['gangliaClusters.conf','gangliaEnv.sh','gangliaLib.sh']: 
-       ganglia_server_host => $ganglia_server_host
-     }
- 
-     anchor{'hdp-ganglia::config::begin':} -> Hdp::Directory_recursive_create[$shell_cmds_dir] -> Hdp-ganglia::Config::Shell_file<||> -> anchor{'hdp-ganglia::config::end':}
-     Anchor['hdp-ganglia::config::begin'] -> Hdp-ganglia::Config::Init_file<||> -> Anchor['hdp-ganglia::config::end']
-     Anchor['hdp-ganglia::config::begin'] -> Hdp-ganglia::Config::File<||> -> Anchor['hdp-ganglia::config::end']
-  }
-}
-
-define hdp-ganglia::config::shell_file()
-{
-  file { "${hdp-ganglia::params::ganglia_shell_cmds_dir}/${name}":
-    source => "puppet:///modules/hdp-ganglia/${name}", 
-    mode => '0755'
-  }
-}
-
-define hdp-ganglia::config::init_file()
-{
-  file { "/etc/init.d/hdp-${name}":
-    source => "puppet:///modules/hdp-ganglia/${name}.init", 
-    mode => '0755'
-  }
-}
-
-### config files
-define hdp-ganglia::config::file(
-  $ganglia_server_host = undef
-)
-{
-  hdp::configfile { "${hdp-ganglia::params::ganglia_shell_cmds_dir}/${name}":
-    component           => 'ganglia',
-    owner               => root,
-    group               => root
-  }
-  if ($ganglia_server_host != undef) {
-    Hdp::Configfile<||>{ganglia_server_host => $ganglia_server_host}
-  }
-}

+ 0 - 43
ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/config/generate_daemon.pp

@@ -1,43 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#TODO: these scripts called should be converted to native puppet
-define hdp-ganglia::config::generate_daemon(
-  $ganglia_service,
-  $role,
-  $owner = 'root',
-  $group = $hdp::params::user_group
-)
-{
-  $shell_cmds_dir = $hdp-ganglia::params::ganglia_shell_cmds_dir
-  $cmd = $ganglia_service ? {
-    'gmond'  => $role ? {
-      'server' => "${shell_cmds_dir}/setupGanglia.sh -c ${name} -m -o ${owner} -g ${group}",
-       default =>  "${shell_cmds_dir}/setupGanglia.sh -c ${name} -o ${owner} -g ${group}"
-    },
-    'gmetad' => "${shell_cmds_dir}/setupGanglia.sh -t -o ${owner} -g ${group}",
-     default => hdp_fail("Unexpected ganglia service: ${$ganglia_service}")	
-  }
-
-  #TODO: put in test condition
-  hdp::exec { $cmd:
-    command => $cmd
- }
-}

+ 0 - 36
ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/hdp-gmetad/service_check.pp

@@ -1,36 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-ganglia::hdp-gmetad::service_check() 
-{
-  
-  anchor { 'hdp-ganglia::hdp-gmetad::service_check::begin':}
-
-  exec { 'hdp-gmetad':
-    command   => "/etc/init.d/hdp-gmetad status | grep -v failed",
-    tries     => 3,
-    try_sleep => 5,
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    before      => Anchor['hdp-ganglia::hdp-gmetad::service_check::end'],
-    logoutput => "true"
-  }
-
-  anchor{ 'hdp-ganglia::hdp-gmetad::service_check::end':}
-}

+ 0 - 36
ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/hdp-gmond/service_check.pp

@@ -1,36 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-ganglia::hdp-gmond::service_check() 
-{
-  
-  anchor { 'hdp-ganglia::hdp-gmond::service_check::begin':}
-
-  exec { 'hdp-gmond':
-    command   => "/etc/init.d/hdp-gmond status | grep -v failed",
-    tries     => 3,
-    try_sleep => 5,
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    before      => Anchor['hdp-ganglia::hdp-gmond::service_check::end'],
-    logoutput => "true"
-  }
-
-  anchor{ 'hdp-ganglia::hdp-gmond::service_check::end':}
-}

+ 0 - 53
ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/init.pp

@@ -1,53 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-ganglia(
-  $service_state
-)
-{
-  if ! ($service_state in ['no_op', 'uninstalled']) {
-    include hdp-ganglia::params
-    $gmetad_user = $hdp-ganglia::params::gmetad_user
-    $gmond_user = $hdp-ganglia::params::gmond_user
-
-    hdp::group { 'gmetad_group' :
-      group_name => $gmetad_user,
-    }
-
-    hdp::group { 'gmond_group':
-      group_name => $gmond_user,
-    }
-
-    hdp::user { 'gmond_user': 
-      user_name =>  $gmond_user,
-      gid    => $gmond_user,
-      groups => ["$gmond_user"]
-    }
-  
-    hdp::user { 'gmetad_user':
-      user_name => $gmetad_user,
-      gid    => $gmetad_user,
-      groups => ["$gmetad_user"]
-    }
-
-    anchor{'hdp-ganglia::begin':} -> Hdp::Group<|title == 'gmond_group' or title == 'gmetad_group'|> -> Hdp::User['gmond_user'] -> Hdp::User['gmetad_user'] ->  anchor{'hdp-ganglia::end':}
-  }
-}
-

+ 0 - 165
ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/monitor.pp

@@ -1,165 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-ganglia::monitor(
-  $service_state = $hdp::params::cluster_service_state,
-  $ganglia_server_host = undef,
-  $opts = {}
-) inherits hdp-ganglia::params
-{
-  if  ($service_state == 'no_op') {
-  } elsif ($service_state == 'uninstalled') {     
-
-   hdp::package { 'ganglia-monitor':         
-       ensure      => 'uninstalled', 
-      java_needed => false      
-   }
-
-  } else {
-    if ($hdp::params::service_exists['hdp-ganglia::server'] != true) {
-      class { 'hdp-ganglia':
-       service_state => $service_state
-      }
-    }
-
-    hdp::package { 'ganglia-monitor': }
-
-    hdp::package { 'ganglia-gmond-modules-python': }
-
-    if ($hdp::params::service_exists['hdp-ganglia::server'] != true) {
-      class { 'hdp-ganglia::config': ganglia_server_host => $ganglia_server_host}
-    }
-
-    if (($hdp::params::service_exists['hdp-hadoop::datanode'] == true) or ($hdp::params::service_exists['hdp-hadoop::namenode'] == true) or ($hdp::params::service_exists['hdp-hadoop::jobtracker'] == true) or ($hdp::params::service_exists['hdp-hadoop::tasktracker'] == true) or ($hdp::params::service_exists['hdp-yarn::resourcemanager'] == true) or ($hdp::params::service_exists['hdp-yarn::nodemanager'] == true) or ($hdp::params::service_exists['hdp-yarn::historyserver'] == true) or ($hdp::params::service_exists['hdp-hadoop::client'] == true) or ($hdp::params::service_exists['hdp-hadoop::snamenode'] == true)) {
-     class { 'hdp-hadoop::enable-ganglia': }
-   }
-
-    if ($service_exists['hdp-hbase::master'] == true) {
-      class { 'hdp-hbase::master::enable-ganglia': }
-    }
-  
-    if ($service_exists['hdp-hbase::regionserver'] == true) {
-      class { 'hdp-hbase::regionserver::enable-ganglia': }
-    }
-
-    class { 'hdp-ganglia::monitor::config-gen': }
-  
-    class { 'hdp-ganglia::monitor::gmond': ensure => $service_state}
-
-    class { 'hdp-ganglia::monitor::ownership': }
-
-    if ($hdp::params::service_exists['hdp-ganglia::server'] != true) {
-      Class['hdp-ganglia'] -> Hdp::Package['ganglia-monitor'] -> Hdp::Package['ganglia-gmond-modules-python'] -> Class['hdp-ganglia::config'] -> 
-        Class['hdp-ganglia::monitor::config-gen'] -> Class['hdp-ganglia::monitor::ownership'] ->
-        Class['hdp-ganglia::monitor::gmond']
-    } else {
-      Hdp::Package['ganglia-monitor'] -> Hdp::Package['ganglia-gmond-modules-python'] -> Class['hdp-ganglia::monitor::config-gen'] ->
-        Class['hdp-ganglia::monitor::ownership'] -> Class['hdp-ganglia::monitor::gmond']
-    }
-  }
-}
-
-
-class hdp-ganglia::monitor::config-gen()
-{
-
-  $service_exists = $hdp::params::service_exists
-
-  if ($hdp::params::is_namenode_master) {
-    hdp-ganglia::config::generate_daemon { 'HDPNameNode':}
-  }
-  if ($hdp::params::is_jtnode_master) {
-    hdp-ganglia::config::generate_daemon { 'HDPJobTracker':}
-  }
-  if ($hdp::params::is_rmnode_master) {
-    hdp-ganglia::config::generate_daemon { 'HDPResourceManager':}
-  }
-  if ($hdp::params::is_hsnode_master) {
-    hdp-ganglia::config::generate_daemon { 'HDPHistoryServer':}
-  }
-  if ($hdp::params::is_hbase_master) {
-    hdp-ganglia::config::generate_daemon { 'HDPHBaseMaster':}
-  }
-  
-  if (($hdp::params::is_slave == true) 
-    or (($hdp::params::is_namenode_master == false) 
-      and ($hdp::params::is_jtnode_master == false) 
-      and ($hdp::params::is_rmnode_master == false) 
-      and ($hdp::params::is_hsnode_master == false) 
-      and ($hdp::params::is_hbase_master ==  false))) {
-    hdp-ganglia::config::generate_daemon { 'HDPSlaves':}
-  }
-
-  Hdp-ganglia::Config::Generate_daemon<||>{
-    ganglia_service => 'gmond',
-    role => 'monitor'
-  }
-   # 
-  anchor{'hdp-ganglia::monitor::config-gen::begin':} -> Hdp-ganglia::Config::Generate_daemon<||> -> anchor{'hdp-ganglia::monitor::config-gen::end':}
-}
-
-class hdp-ganglia::monitor::gmond(
-  $ensure
-  )
-{
-  if ($ensure == 'running') {
-    class { 'hdp-ganglia::server::delete_default_gmond_process': }
-    $command = "service hdp-gmond start >> /tmp/gmond.log  2>&1 ; /bin/ps auwx | /bin/grep [g]mond  >> /tmp/gmond.log  2>&1"
-   } elsif  ($ensure == 'stopped') {
-    $command = "service hdp-gmond stop >> /tmp/gmond.log  2>&1 ; /bin/ps auwx | /bin/grep [g]mond  >> /tmp/gmond.log  2>&1"
-  }
-  if ($ensure == 'running' or $ensure == 'stopped') {
-    hdp::exec { "hdp-gmond service" :
-      command => $command,
-      path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
-    }
-  }
-}
-
-class hdp-ganglia::monitor::ownership() {
-
-  file { "${hdp-ganglia::params::ganglia_dir}/conf.d":
-    owner  => 'root',
-    group  => $hdp::params::user_group
-  }
-
-  file { "${hdp-ganglia::params::ganglia_dir}/conf.d/modgstatus.conf":
-    owner => 'root',
-    group => $hdp::params::user_group
-  }
-
-  file { "${hdp-ganglia::params::ganglia_dir}/conf.d/multicpu.conf":
-    owner => 'root',
-    group => $hdp::params::user_group
-  }
-
-  file { "${hdp-ganglia::params::ganglia_dir}/gmond.conf":
-    owner => 'root',
-    group => $hdp::params::user_group
-  }
-}
-
-class hdp-ganglia::server::delete_default_gmond_process() {
-  hdp::exec { "delete_default_gmond_process" :
-    command => "chkconfig gmond off",
-    path => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    require => Class['hdp-ganglia::monitor::gmond']
-  }
-}

+ 0 - 79
ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/monitor_and_server.pp

@@ -1,79 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-ganglia::monitor_and_server(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-ganglia::params
-{
-  $ganglia_shell_cmds_dir = $hdp-ganglia::params::ganglia_shell_cmds_dir
-  $ganglia_conf_dir = $hdp-ganglia::params::ganglia_conf_dir
-  $ganglia_runtime_dir = $hdp-ganglia::params::ganglia_runtime_dir
-
-  #note: includes the common package ganglia-monitor
-  class { 'hdp-ganglia':
-    service_state => $service_state
-  }
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['uninstalled']) {
-    class { 'hdp-ganglia::server::packages':
-      ensure => 'uninstalled'
-      }
-
-    hdp::directory { [$ganglia_conf_dir,$ganglia_runtime_dir]:
-      service_state => $service_state,
-      force => true
-    }
-    
-    class { 'hdp-ganglia::config':
-      service_state => $service_state
-    }
-
-    Class['hdp-ganglia'] -> Class['hdp-ganglia::server::packages'] -> 
-      Hdp::Directory[$ganglia_conf_dir] -> Hdp::Directory[$ganglia_runtime_dir] ->
-      Class['hdp-ganglia::config']
-  } elsif ($service_state in ['running','stopped','installed_and_configured']) {
-    class { 'hdp-ganglia::server::packages': }
-
-    class { 'hdp-ganglia::config': 
-     ganglia_server_host => $hdp::params::host_address,
-     service_state       => $service_state
-     }
-
-    class {'hdp-ganglia::monitor::config-gen': }      
-    
-    
-    hdp-ganglia::config::generate_daemon { 'gmetad':
-      ganglia_service => 'gmetad'
-    }
-
-    class { 'hdp-ganglia::service::change_permission':
-      ensure => $service_state
-    }
-
-    #top level no anchors needed
-    Class['hdp-ganglia'] -> Class['hdp-ganglia::server::packages'] -> Class['hdp-ganglia::config'] -> 
-      Class['hdp-ganglia::monitor::config-gen'] -> Hdp-ganglia::Config::Generate_daemon['gmetad'] ->
-      Class['hdp-ganglia::service::change_permission']
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}

+ 0 - 35
ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/params.pp

@@ -1,35 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-ganglia::params() inherits hdp::params
-{
-  $ganglia_conf_dir = "/etc/ganglia/hdp"
-  $ganglia_dir = "/etc/ganglia"
-  $ganglia_runtime_dir = "/var/run/ganglia/hdp"
-
-  $ganglia_shell_cmds_dir = hdp_default("ganglia_shell_cmd_dir","/usr/libexec/hdp/ganglia")
-  
-  $gmetad_user = $hdp::params::gmetad_user
-  $gmond_user = $hdp::params::gmond_user
-
-  $webserver_group = hdp_default("webserver_group","apache")
-  $rrdcached_default_base_dir = "/var/lib/ganglia/rrds"
-  $rrdcached_base_dir = hdp_default("rrdcached_base_dir", "/var/lib/ganglia/rrds")
-}

+ 0 - 259
ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/server.pp

@@ -1,259 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-ganglia::server(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits  hdp-ganglia::params
-{
-  $hdp::params::service_exists['hdp-ganglia::server'] = true
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state == 'uninstalled') {
-
-   class { 'hdp-ganglia::server::packages':
-      ensure => 'uninstalled',
-      service_state => $service_state
-   }
-
-   class { 'hdp-ganglia::server::files':
-      ensure => 'absent'
-   }
-
-  } else {
-  class { 'hdp-ganglia':
-    service_state => $service_state
-  }
-
-  class { 'hdp-ganglia::server::packages':
-    ensure => 'present',
-    service_state => $service_state
-  }
-
-  class { 'hdp-ganglia::config': 
-    ganglia_server_host => $hdp::params::host_address,
-    service_state       => $service_state 
-  }
-
-  if ($hdp::params::has_namenodes) {
-    hdp-ganglia::config::generate_daemon { 'HDPNameNode':
-      ganglia_service => 'gmond',
-      role => 'server'
-    }
-  }
-  
-  if ($hdp::params::has_jobtracker) {
-    hdp-ganglia::config::generate_daemon { 'HDPJobTracker':
-      ganglia_service => 'gmond',
-      role => 'server'
-    }
-  }
-  
-  if ($hdp::params::has_hbase_masters) {
-    hdp-ganglia::config::generate_daemon { 'HDPHBaseMaster':
-      ganglia_service => 'gmond',
-      role => 'server'
-    }
-  }
-
-  if ($hdp::params::has_resourcemanager) {
-    hdp-ganglia::config::generate_daemon { 'HDPResourceManager':
-      ganglia_service => 'gmond',
-      role => 'server'
-    }
-  }
-  
-  if ($hdp::params::has_histroryserver) {
-    hdp-ganglia::config::generate_daemon { 'HDPHistoryServer':
-      ganglia_service => 'gmond',
-      role => 'server'
-    }
-  }
-
-  hdp-ganglia::config::generate_daemon { 'HDPSlaves':
-    ganglia_service => 'gmond',
-    role => 'server'
-  }
-
-  hdp-ganglia::config::generate_daemon { 'gmetad':
-    ganglia_service => 'gmetad',
-    role => 'server'
-  }
-
-  class { 'hdp-ganglia::server::gmetad': ensure => $service_state}
-
-  class { 'hdp-ganglia::service::change_permission': ensure => $service_state }
-
-  if ($service_state == 'installed_and_configured') {
-    $webserver_state = 'restart'
-  } elsif ($service_state == 'running') {
-    $webserver_state = 'running'
-  } else {
-    # We are never stopping httpd
-    #$webserver_state = $service_state
-  }
-
-  class { 'hdp-monitor-webserver': service_state => $webserver_state}
-
-  class { 'hdp-ganglia::server::files':
-     ensure => 'present'
-  }
-
-  file { "${hdp-ganglia::params::ganglia_dir}/gmetad.conf":
-    owner => 'root',
-    group => $hdp::params::user_group
-  }
-
-  #top level does not need anchors
-  Class['hdp-ganglia'] -> Class['hdp-ganglia::server::packages'] -> Class['hdp-ganglia::config'] ->
-    Hdp-ganglia::Config::Generate_daemon<||> ->
-    File["${hdp-ganglia::params::ganglia_dir}/gmetad.conf"] -> Class['hdp-ganglia::service::change_permission'] ->
-    Class['hdp-ganglia::server::files'] -> Class['hdp-ganglia::server::gmetad'] -> Class['hdp-monitor-webserver']
- }
-}
-
-class hdp-ganglia::server::packages(
-  $ensure = present,
-  $service_state = 'installed_and_configured'
-)
-{
-  hdp::package { ['libganglia','ganglia-devel','ganglia-server','ganglia-web']: 
-    ensure      => $ensure,
-    java_needed => false,
-    require => Hdp::Package ['rrdtool-python']
-  }
-
-  # Removing conflicting packages only once to workaround "/bin/rpm -e absent-absent-absent.absent" bug (BUG-2881)
-  if ($service_state == 'installed_and_configured' and $hdp::params::hdp_os_type == 'centos5') {
-    # Remove conflicting 32bit package
-    hdp::package { ['rrdtool-devel']:
-      ensure      => 'absent',
-      java_needed => false,
-      before => Hdp::Package ['rrdtool']
-    }
-
-    # Remove conflicting 32bit package
-    hdp::package { ['rrdtool']:
-      ensure      => 'absent',
-      java_needed => false,
-      before => Hdp::Package ['rrdtool-python']
-    }
-  }
-
-  hdp::package { ['rrdtool-python']:
-    ensure      => $ensure,
-    java_needed => false
-  }
-
-}
-
-class hdp-ganglia::server::files(
-  $ensure = present 
-)
-{
-  $rrd_py_path = $hdp::params::rrd_py_path [$hdp::params::hdp_os_type]
-  hdp::directory_recursive_create{$rrd_py_path:
-    ensure => "directory", 
-    override_owner => false 
-  }
-
-  $rrd_py_file_path = "${rrd_py_path}/rrd.py"
-
-  file{$rrd_py_file_path :
-    ensure => $ensure,
-    source => "puppet:///modules/hdp-ganglia/rrd.py",
-    mode   => '0755'
-  }
-
-  anchor{ 'hdp-ganglia::server::files::begin' : } -> Hdp::Directory_recursive_create[$rrd_py_path] -> File[$rrd_py_file_path] -> anchor{ 'hdp-ganglia::server::files::end' : }
-
-  $rrd_files_dir = $hdp-ganglia::params::rrdcached_base_dir
-  $rrd_file_owner = $hdp-ganglia::params::gmetad_user
-  $rrdcached_default_file_dir = $hdp-ganglia::params::rrdcached_default_base_dir
-
-  ## If directory is different fr omdefault make sure it exists
-  if ($rrdcached_default_file_dir != $rrd_files_dir) {
-    hdp::directory_recursive_create{ $rrd_files_dir :
-      ensure => "directory",
-      owner => $rrd_file_owner,
-      group => $rrd_file_owner,
-      mode => '0755'
-    }
-
-    file { $rrdcached_default_file_dir :
-      ensure => link,
-      target => $rrd_files_dir,
-      force => true
-    }
-
-    File[$rrd_py_file_path] -> Hdp::Directory_recursive_create[$rrd_files_dir] -> File[$rrdcached_default_file_dir] -> Anchor['hdp-ganglia::server::files::end']
-  }
-  elsif ($rrd_file_owner != $hdp::params::NOBODY_USER) {
-    #owner of rrdcached_default_file_dir is 'nobody' by default 
-    #need to change owner to gmetad_user for proper gmetad service start
-    
-    hdp::directory { $rrdcached_default_file_dir:
-      owner => $rrd_file_owner,
-      group => $rrd_file_owner,
-      override_owner => true
-    }
-    
-    File[$rrd_py_file_path] -> Hdp::Directory[$rrdcached_default_file_dir] -> Anchor['hdp-ganglia::server::files::end']
-  }
-}
-
-
-class hdp-ganglia::service::change_permission(
-  $ensure
-)
-{
-  if ($ensure == 'running' or $ensure == 'installed_and_configured') {
-    hdp::directory_recursive_create { '/var/lib/ganglia/dwoo' :
-      mode => '0777',
-      owner => $hdp-ganglia::params::gmetad_user
-    }
-  }
-}
-
-class hdp-ganglia::server::gmetad(
-  $ensure
-)
-{
-  if ($ensure == 'running') {
-    class { 'hdp-ganglia::server::delete_default_gmetad_process': }
-    $command = "service hdp-gmetad start >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1"
-   } elsif  ($ensure == 'stopped') {
-    $command = "service hdp-gmetad stop >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1"
-  }
-  if ($ensure == 'running' or $ensure == 'stopped') {
-    hdp::exec { "hdp-gmetad service" :
-      command => "$command",
-      path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
-    }
-  }
-}
-
-class hdp-ganglia::server::delete_default_gmetad_process() {
-  hdp::exec { "delete_default_gmetad_process" :
-    command => "chkconfig gmetad off",
-    path => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    require => Class['hdp-ganglia::server::gmetad']
-  }
-}

+ 0 - 43
ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaClusters.conf.erb

@@ -1,43 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-#########################################################
-### ClusterName           GmondMasterHost   GmondPort ###
-#########################################################
-
-    HDPSlaves       	<%=scope.function_hdp_host("ganglia_server_host")%>  8660
-
-<% if (scope.function_hdp_default('namenode_host') != '')%>
-    HDPNameNode         <%=scope.function_hdp_host("ganglia_server_host")%>  8661
-<%end-%>
-<% if (scope.function_hdp_get_major_stack_version([scope.function_hdp_template_var("stack_version")]) < 2)%>
-    <% if (scope.function_hdp_default('jtnode_host') != '')%>
-    HDPJobTracker     	<%=scope.function_hdp_host("ganglia_server_host")%>  8662
-    <%end-%>
-<%end-%>
-<% if (scope.function_hdp_default('hbase_master_hosts') != '')%>
-    HDPHBaseMaster      <%=scope.function_hdp_host("ganglia_server_host")%>  8663
-<%end-%>
-<% if (scope.function_hdp_get_major_stack_version([scope.function_hdp_template_var("stack_version")]) >= 2)%>
-    <% if (scope.function_hdp_default('rm_host') != '')%>
-    HDPResourceManager  <%=scope.function_hdp_host("ganglia_server_host")%>  8664
-    <%end-%>
-    <% if (scope.function_hdp_default('hs_host') != '')%>
-    HDPHistoryServer    <%=scope.function_hdp_host("ganglia_server_host")%>  8666
-    <%end-%>
-<%end-%>

+ 0 - 24
ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaEnv.sh.erb

@@ -1,24 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Unix users and groups for the binaries we start up.
-GMETAD_USER=<%=scope.function_hdp_template_var("gmetad_user")%>;
-GMOND_USER=<%=scope.function_hdp_template_var("gmond_user")%>;
-WEBSERVER_GROUP=<%=scope.function_hdp_template_var("webserver_group")%>;

+ 0 - 62
ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaLib.sh.erb

@@ -1,62 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-GANGLIA_CONF_DIR=<%=scope.function_hdp_template_var("ganglia_conf_dir")%>;
-GANGLIA_RUNTIME_DIR=<%=scope.function_hdp_template_var("ganglia_runtime_dir")%>;
-RRDCACHED_BASE_DIR=<%=scope.function_hdp_template_var("rrdcached_base_dir")%>;
-
-# This file contains all the info about each Ganglia Cluster in our Grid.
-GANGLIA_CLUSTERS_CONF_FILE=./gangliaClusters.conf;
-
-function createDirectory()
-{
-    directoryPath=${1};
-
-    if [ "x" != "x${directoryPath}" ]
-    then
-        mkdir -p ${directoryPath};
-    fi
-}
-
-function getGangliaClusterInfo()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # Fetch the particular entry for ${clusterName} from ${GANGLIA_CLUSTERS_CONF_FILE}.
-        awk -v clusterName=${clusterName} '($1 !~ /^#/) && ($1 == clusterName)' ${GANGLIA_CLUSTERS_CONF_FILE};
-    else
-        # Spit out all the non-comment, non-empty lines from ${GANGLIA_CLUSTERS_CONF_FILE}.
-        awk '($1 !~ /^#/) && (NF)' ${GANGLIA_CLUSTERS_CONF_FILE};
-    fi
-}
-
-function getConfiguredGangliaClusterNames()
-{
-  # Find all the subdirectories in ${GANGLIA_CONF_DIR} and extract only 
-  # the subdirectory name from each.
-  if [ -e ${GANGLIA_CONF_DIR} ]
-  then  
-    find ${GANGLIA_CONF_DIR} -maxdepth 1 -mindepth 1 -type d | xargs -n1 basename;
-  fi
-}

+ 0 - 62
ambari-agent/src/main/puppet/modules/hdp-hadoop/files/checkForFormat.sh

@@ -1,62 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-export hdfs_user=$1
-shift
-export conf_dir=$1
-shift
-export mark_dir=$1
-shift
-export name_dirs=$*
-
-export EXIT_CODE=0
-export command="namenode -format"
-export list_of_non_empty_dirs=""
-
-mark_file=/var/run/hadoop/hdfs/namenode-formatted
-if [[ -f ${mark_file} ]] ; then
-  rm -f ${mark_file}
-  mkdir -p ${mark_dir}
-fi
-
-if [[ ! -d $mark_dir ]] ; then
-  for dir in `echo $name_dirs | tr ',' ' '` ; do
-    echo "NameNode Dirname = $dir"
-    cmd="ls $dir | wc -l  | grep -q ^0$"
-    eval $cmd
-    if [[ $? -ne 0 ]] ; then
-      (( EXIT_CODE = $EXIT_CODE + 1 ))
-      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
-    fi
-  done
-
-  if [[ $EXIT_CODE == 0 ]] ; then
-    su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
-  else
-    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
-  fi
-else
-  echo "${mark_dir} exists. Namenode DFS already formatted"
-fi
-
-exit $EXIT_CODE
-

+ 0 - 53
ambari-agent/src/main/puppet/modules/hdp-hadoop/files/checkWebUI.py

@@ -1,53 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import optparse
-import httplib
-
-#
-# Main.
-#
-def main():
-  parser = optparse.OptionParser(usage="usage: %prog [options] component ")
-  parser.add_option("-m", "--hosts", dest="hosts", help="Comma separated hosts list for WEB UI to check it availability")
-  parser.add_option("-p", "--port", dest="port", help="Port of WEB UI to check it availability")
-
-  (options, args) = parser.parse_args()
-  
-  hosts = options.hosts.split(',')
-  port = options.port
-
-  for host in hosts:
-    try:
-      conn = httplib.HTTPConnection(host, port)
-      # This can be modified to get a partial url part to be sent with request
-      conn.request("GET", "/")
-      httpCode = conn.getresponse().status
-      conn.close()
-    except Exception:
-      httpCode = 404
-
-    if httpCode != 200:
-      print "Cannot access WEB UI on: http://" + host + ":" + port
-      exit(1)
-      
-
-if __name__ == "__main__":
-  main()

+ 0 - 132
ambari-agent/src/main/puppet/modules/hdp-hadoop/files/task-log4j.properties

@@ -1,132 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-
-# Define some default values that can be overridden by system properties
-hadoop.root.logger=INFO,console
-hadoop.log.dir=.
-hadoop.log.file=hadoop.log
-
-#
-# Job Summary Appender 
-#
-# Use following logger to send summary to separate file defined by 
-# hadoop.mapreduce.jobsummary.log.file rolled daily:
-# hadoop.mapreduce.jobsummary.logger=INFO,JSA
-# 
-hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
-hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hadoop.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshhold=ALL
-
-#
-# Daily Rolling File Appender
-#
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this 
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-#
-# TaskLog Appender
-#
-
-#Default values
-hadoop.tasklog.taskid=null
-hadoop.tasklog.iscleanup=false
-hadoop.tasklog.noKeepSplits=4
-hadoop.tasklog.totalLogFileSize=100
-hadoop.tasklog.purgeLogSplits=true
-hadoop.tasklog.logsRetainHours=12
-
-log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
-
-log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
-log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-
-#
-# Rolling File Appender
-#
-
-#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Logfile size and and 30-day backups
-#log4j.appender.RFA.MaxFileSize=1MB
-#log4j.appender.RFA.MaxBackupIndex=30
-
-#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-# Custom Logging levels
-
-hadoop.metrics.log.level=INFO
-#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
-#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
-#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
-
-# Jets3t library
-log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
-
-#
-# Null Appender
-# Trap security logger on the hadoop client side
-#
-log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
-

+ 0 - 65
ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_mode.rb

@@ -1,65 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#to handle differences in how args passed in
-module Puppet::Parser::Functions
-  newfunction(:hdp_hadoop_get_mode, :type => :rvalue) do |args|
-  
-    dir = args[0]
-
-    oozie_dir = lookupvar("::hdp::params::oozie_hdfs_user_dir")
-    oozie_dir_mode = lookupvar("::hdp::params::oozie_hdfs_user_mode") 
-    
-    hcat_dir = lookupvar("::hdp::params::hcat_hdfs_user_dir")
-    hcat_dir_mode = lookupvar("::hdp::params::hcat_hdfs_user_mode") 
-    
-    webhcat_dir = lookupvar("::hdp::params::webhcat_hdfs_user_dir")
-    webhcat_dir_mode = lookupvar("::hdp::params::webhcat_hdfs_user_mode") 
-    
-    hive_dir = lookupvar("::hdp::params::hive_hdfs_user_dir")
-    hive_dir_mode = lookupvar("::hdp::params::hive_hdfs_user_mode") 
-    
-    smoke_dir = lookupvar("::hdp::params::smoke_hdfs_user_dir")
-    smoke_dir_mode = lookupvar("::hdp::params::smoke_hdfs_user_mode") 
-    
-    modes = []
-    modes.push({:dir => oozie_dir, :mode => oozie_dir_mode})
-    modes.push({:dir => hcat_dir, :mode => hcat_dir_mode})
-    modes.push({:dir => webhcat_dir, :mode => webhcat_dir_mode})
-    modes.push({:dir => hive_dir, :mode => hive_dir_mode})
-    modes.push({:dir => smoke_dir, :mode => smoke_dir_mode})
-
-    modes_grouped = {}
-    modes.each do |item|
-      if modes_grouped[item[:dir]].nil?
-        modes_grouped[item[:dir]]=[]
-      end
-      modes_grouped[item[:dir]]=modes_grouped[item[:dir]] + [(item[:mode])]
-    end
-
-    modes_max = {}
-    
-    modes_grouped.each_key do |key|
-      modes_max[key] = modes_grouped[key].max
-    end
-
-    modes_max[dir]
-  end
-end

+ 0 - 47
ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_namenode_id.rb

@@ -1,47 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#to get namenode service id in HA setup
-
-module Puppet::Parser::Functions
-  newfunction(:hdp_hadoop_get_namenode_id, :type => :rvalue) do |args|
-    namenode_id = ""
-    if args.length > 1
-      # Get hdfs-site to lookup hostname properties
-      lookup_property = args[0]
-      siteName = args[1]
-      siteConfig = lookupvar("#{siteName}")
-      nn_ids_str = lookupvar("::hdp::params::dfs_ha_namenode_ids")
-      hostname = lookupvar("::hdp::params::hostname")
-      nn_ids = nn_ids_str.to_s.split(',')
-
-      if nn_ids.length > 1
-        nn_ids.each do |id|
-          lookup_key = lookup_property + "." + id.to_s.strip
-          property_val = siteConfig.fetch(lookup_key, "")
-          if property_val != "" and property_val.include? hostname
-            namenode_id = id
-          end
-        end
-      end
-    end
-    namenode_id.strip
-  end
-end

+ 0 - 51
ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_owner.rb

@@ -1,51 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#to handle differences in how args passed in
-module Puppet::Parser::Functions
-  newfunction(:hdp_hadoop_get_owner, :type => :rvalue) do |args|
-  
-    dir = args[0]
-    
-    oozie_dir = lookupvar("::hdp::params::oozie_hdfs_user_dir")
-    oozie_user = lookupvar("::hdp::params::oozie_user") 
-
-    hcat_dir = lookupvar("::hdp::params::hcat_hdfs_user_dir")
-    hcat_user = lookupvar("::hdp::params::hcat_user") 
-
-    webhcat_dir = lookupvar("::hdp::params::webhcat_hdfs_user_dir")
-    webhcat_user = lookupvar("::hdp::params::webhcat_user") 
-
-    hive_dir = lookupvar("::hdp::params::hive_hdfs_user_dir")
-    hive_user = lookupvar("::hdp::params::hive_user") 
-
-    smoke_dir = lookupvar("::hdp::params::smoke_hdfs_user_dir")
-    smoke_user = lookupvar("::hdp::params::smokeuser") 
-
-    dirs_to_owners = {}
-    dirs_to_owners[oozie_dir] = oozie_user
-    dirs_to_owners[hcat_dir] = hcat_user
-    dirs_to_owners[webhcat_dir] = webhcat_user
-    dirs_to_owners[hive_dir] = hive_user
-    dirs_to_owners[smoke_dir] = smoke_user
-
-    dirs_to_owners[dir]
-  end
-end

+ 0 - 56
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/client.pp

@@ -1,56 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::client(
-  $service_state = $hdp::params::cluster_client_state
-) inherits hdp-hadoop::params
-{
-  $hdp::params::service_exists['hdp-hadoop::client'] = true
-
-  Hdp-hadoop::Common<||>{service_state => $service_state}
-
-  if ($hdp::params::use_32_bits_on_slaves == true) {
-    Hdp-hadoop::Package<||>{include_32_bit => true}
-    Hdp-hadoop::Configfile<||>{sizes +> 32}
-  } else {
-    Hdp-hadoop::Package<||>{include_64_bit => true}
-    Hdp-hadoop::Configfile<||>{sizes +> 64}
-  }
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['installed_and_configured','uninstalled']) {
-    #adds package, users and directories, and common hadoop configs
-    include hdp-hadoop::initialize
-
-    if ( ($service_state == 'installed_and_configured') and
-         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-      $masterHost = $kerberos_adminclient_host[0]
-      hdp::download_keytab { 'hadoop_client_ambari_qa_keytab' :
-        masterhost => $masterHost,
-        keytabdst => "${$keytab_path}/${smokeuser}.headless.keytab",
-        keytabfile => "${smokeuser}.headless.keytab",
-        owner => $smokeuser,
-        hostnameInPrincipals => 'no'
-      }
-    }
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}

+ 0 - 100
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/datanode.pp

@@ -1,100 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::datanode(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-hadoop::params 
-{
-
-  $hdp::params::service_exists['hdp-hadoop::datanode'] = true
-
-  Hdp-hadoop::Common<||>{service_state => $service_state}
-
-  if ($hdp::params::use_32_bits_on_slaves == true) {
-    Hdp-hadoop::Package<||>{include_32_bit => true}
-    Hdp-hadoop::Configfile<||>{sizes +> 32}
-  } else {
-    Hdp-hadoop::Package<||>{include_64_bit => true}
-    Hdp-hadoop::Configfile<||>{sizes +> 64}
-  }
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
-    $dfs_data_dir = $hdp-hadoop::params::dfs_data_dir
-  
-    if (($hdp::params::service_exists['hdp-hadoop::namenode'] == true) or ($hdp::params::service_exists['hdp-hadoop::snamenode'] == true)){
-      $a_namenode_on_node = true
-    } else {
-      $a_namenode_on_node = false
-    }
-
-    #adds package, users and directories, and common hadoop configs
-    include hdp-hadoop::initialize
-
-    if ( ($service_state == 'installed_and_configured') and
-         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-      $masterHost = $kerberos_adminclient_host[0]
-      hdp::download_keytab { 'datanode_service_keytab' :
-        masterhost => $masterHost,
-        keytabdst => "${$keytab_path}/dn.service.keytab",
-        keytabfile => 'dn.service.keytab',
-        owner => $hdp-hadoop::params::hdfs_user
-      }
-    }
-
-  
-    hdp-hadoop::datanode::create_data_dirs { $dfs_data_dir: 
-      service_state => $service_state
-    }
-
-    if ($a_namenode_on_node == true){
-      $create_pid_dir = false
-      $create_log_dir = false
-    } else {
-      $create_pid_dir = true
-      $create_log_dir = true
-    }
-    
-    hdp-hadoop::service{ 'datanode':
-      ensure         => $service_state,
-      user           => $hdp-hadoop::params::hdfs_user,
-      create_pid_dir => $create_pid_dir,
-      create_log_dir => $create_log_dir
-    }
-    
-    #top level does not need anchors
-    Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Datanode::Create_data_dirs<||> -> Hdp-hadoop::Service['datanode'] -> Anchor['hdp-hadoop::end'] 
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-define hdp-hadoop::datanode::create_data_dirs($service_state)
-{
-  $dirs = hdp_array_from_comma_list($name)
-  hdp::directory_recursive_create_ignore_failure { $dirs :
-    owner => $hdp-hadoop::params::hdfs_user,
-    mode => '0750',
-    service_state => $service_state,
-    force => true
-  }
-
-}

+ 0 - 36
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/glusterfs.pp

@@ -1,36 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::glusterfs_client(
-  $service_state = $hdp::params::cluster_client_state,
-  $opts = {}
-) inherits hdp-hadoop::params
-{
-  $hdp::params::service_exists['hdp-hadoop::glusterfs_client'] = true
-  Hdp-hadoop::Common<||>{service_state => $service_state}
-  Hdp-hadoop::Package<||>{include_64_bit => true}
-  Hdp-hadoop::Configfile<||>{sizes +> 64}
-  
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {
-  	#adds package, users and directories, and common hadoop configs
-  	include hdp-hadoop::initialize
-  }
-}

+ 0 - 26
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/glusterfs_service_check.pp

@@ -1,26 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::glusterfs_service_check(
-  $service_state = $hdp::params::cluster_client_state
-) inherits hdp-hadoop::params
-{
-  $hdp::params::service_exists['hdp-hadoop::glusterfs'] = true
-}

+ 0 - 84
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/copyfromlocal.pp

@@ -1,84 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-define hdp-hadoop::hdfs::copyfromlocal(
-  $service_state,
-  $owner = unset,
-  $group = unset,
-  $recursive_chown = false,
-  $mode = undef,
-  $recursive_chmod = false,
-  $dest_dir = undef,
-  $kinit_if_needed = undef
-) 
-{
- 
-  if ($service_state == 'running') {
-    $copy_cmd = "fs -copyFromLocal ${name} ${dest_dir}"
-    if ($kinit_if_needed == undef) {
-      $unless_cmd = "hadoop fs -ls ${dest_dir} >/dev/null 2>&1"
-    } else {
-      $unless_cmd = "${kinit_if_needed} hadoop fs -ls ${dest_dir} >/dev/null 2>&1"
-    }
-    ## exec-hadoop does a kinit based on user, but unless does not
-    hdp-hadoop::exec-hadoop { $copy_cmd:
-      command => $copy_cmd,
-      unless => $unless_cmd,
-      user => $owner
-    }
-    if ($owner == unset) {
-      $chown = ""
-    } else {
-      if ($group == unset) {
-        $chown = $owner
-      } else {
-        $chown = "${owner}:${group}"
-     } 
-    }  
- 
-    if (chown != "") {
-      #TODO: see if there is a good 'unless test'
-      if ($recursive_chown == true) {
-        $chown_cmd = "fs -chown -R ${chown} ${dest_dir}"
-      } else {
-        $chown_cmd = "fs -chown ${chown} ${dest_dir}"
-      }
-      hdp-hadoop::exec-hadoop {$chown_cmd :
-        command => $chown_cmd,
-        user => $owner
-      }
-      Hdp-hadoop::Exec-hadoop[$copy_cmd] -> Hdp-hadoop::Exec-hadoop[$chown_cmd]
-    }
-  
-    if ($mode != undef) {
-      #TODO: see if there is a good 'unless test'
-      if ($recursive_mode == true) {
-        $chmod_cmd = "fs -chmod -R ${mode} ${dest_dir}"
-      } else {
-        $chmod_cmd = "fs -chmod ${mode} ${dest_dir}"
-      }
-      hdp-hadoop::exec-hadoop {$chmod_cmd :
-        command => $chmod_cmd,
-        user => $owner
-      }
-      Hdp-hadoop::Exec-hadoop[$copy_cmd] -> Hdp-hadoop::Exec-hadoop[$chmod_cmd]
-    }
-  }       
-}

+ 0 - 49
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/decommission.pp

@@ -1,49 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::hdfs::decommission(
-) inherits hdp-hadoop::params
-{
-  if hdp_is_empty($configuration[hdfs-site]['dfs.hosts.exclude']) {
-    hdp_fail("There is no path to exclude file in configuration!")
-  }
-
-  $kinit_path = $hdp::params::kinit_path_local
-  $keytab_path = $hdp::params::hdfs_user_keytab
-  $hdfs_user = $hdp::params::hdfs_user
-  $kinit_cmd = "su - ${hdfs_user} -c '${kinit_path} -kt ${keytab_path} ${hdfs_user}'"
-
-  if ($hdp::params::security_enabled == true) {
-    exec { 'kinit_before_decommission' :
-      command => $kinit_cmd,
-      path => ['/bin'],
-      before => Hdp-Hadoop::Hdfs::Generate_Exclude_File['exclude_file']
-    }
-  }
-
-  hdp-hadoop::hdfs::generate_exclude_file{'exclude_file':}
-
-  hdp::exec{"hadoop dfsadmin -refreshNodes":
-      command => "hadoop dfsadmin -refreshNodes",
-      user => $hdp::params::hdfs_user,
-      require => Hdp-Hadoop::Hdfs::Generate_Exclude_File['exclude_file']
-    }
-  
-}

+ 0 - 121
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/directory.pp

@@ -1,121 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#TODO: unset should br changed to undef; just to be consistent
-define hdp-hadoop::hdfs::directory(
-  $service_state = 'running',
-  $owner = unset,
-  $group = unset,
-  $recursive_chown = false,
-  $mode = undef,
-  $recursive_chmod = false
-) 
-{
-  $dir_exists = "hadoop fs -ls ${name} >/dev/null 2>&1"
-  $namenode_safe_mode_off = "hadoop dfsadmin -safemode get|grep 'Safe mode is OFF'"
-
-  # Short circuit the expensive dfs client checks if directory was already created
-  $stub_dir = $hdp-hadoop::params::namenode_dirs_created_stub_dir
-  $stub_filename = $hdp-hadoop::params::namenode_dirs_stub_filename
-  $dir_absent_in_stub = "grep -q '^${name}$' ${stub_dir}/${stub_filename} > /dev/null 2>&1; test $? -ne 0"
-  $record_dir_in_stub = "echo '${name}' >> ${stub_dir}/${stub_filename}"
-  $tries = 30
-  $try_sleep = 10
-
-  if ($hdp::params::dfs_ha_enabled == true) {
-     $namenode_id = $hdp-hadoop::params::namenode_id
-     if (hdp_is_empty($namenode_id) == false) {
-       $dfs_check_nn_status_cmd = "hdfs haadmin -getServiceState $namenode_id | grep active > /dev/null"
-     }
-   } else {
-     $dfs_check_nn_status_cmd = "true"
-   }
-
-  if ($service_state == 'running') {
-    if (hdp_get_major_stack_version($hdp::params::stack_version) >= 2) {
-      $mkdir_cmd = "fs -mkdir -p ${name}"
-    } else {
-      $mkdir_cmd = "fs -mkdir ${name}"
-    }
-
-    hdp-hadoop::exec-hadoop { $mkdir_cmd:
-      command   => $mkdir_cmd,
-      unless    => "$dir_absent_in_stub && $dfs_check_nn_status_cmd && $dir_exists && ! $namenode_safe_mode_off",
-      onlyif    => "$dir_absent_in_stub && $dfs_check_nn_status_cmd && ! $dir_exists",
-      try_sleep => $try_sleep,
-      tries     => $tries
-    }
-
-    hdp::exec { $record_dir_in_stub:
-      command => $record_dir_in_stub,
-      user => $hdp-hadoop::params::hdfs_user,
-      onlyif => $dir_absent_in_stub
-    }
-
-    Hdp-hadoop::Exec-hadoop[$mkdir_cmd] ->
-    Hdp::Exec[$record_dir_in_stub]
-
-    if ($owner == unset) {
-      $chown = ""
-    } else {
-      if ($group == unset) {
-        $chown = $owner
-      } else {
-        $chown = "${owner}:${group}"
-     } 
-    }  
- 
-    if (chown != "") {
-      #TODO: see if there is a good 'unless test'
-      if ($recursive_chown == true) {
-        $chown_cmd = "fs -chown -R ${chown} ${name}"
-      } else {
-        $chown_cmd = "fs -chown ${chown} ${name}"
-      }
-      hdp-hadoop::exec-hadoop {$chown_cmd :
-        command   => $chown_cmd,
-        onlyif    => "$dir_absent_in_stub && $dfs_check_nn_status_cmd && $namenode_safe_mode_off && $dir_exists",
-        try_sleep => $try_sleep,
-        tries     => $tries
-      }
-      Hdp-hadoop::Exec-hadoop[$mkdir_cmd] ->
-      Hdp-hadoop::Exec-hadoop[$chown_cmd] ->
-      Hdp::Exec[$record_dir_in_stub]
-    }
-  
-    if ($mode != undef) {
-      #TODO: see if there is a good 'unless test'
-      if ($recursive_chmod == true) {
-        $chmod_cmd = "fs -chmod -R ${mode} ${name}"
-      } else {
-        $chmod_cmd = "fs -chmod ${mode} ${name}"
-      }
-      hdp-hadoop::exec-hadoop { $chmod_cmd :
-        command   => $chmod_cmd,
-        onlyif    => "$dir_absent_in_stub && $dfs_check_nn_status_cmd && $namenode_safe_mode_off && $dir_exists",
-        try_sleep => $try_sleep,
-        tries     => $tries
-      }
-      Hdp-hadoop::Exec-hadoop[$mkdir_cmd] ->
-      Hdp-hadoop::Exec-hadoop[$chmod_cmd] ->
-      Hdp::Exec[$record_dir_in_stub]
-    }
-  }       
-}

+ 0 - 42
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/generate_exclude_file.pp

@@ -1,42 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-define hdp-hadoop::hdfs::generate_exclude_file()
-{
-  $exlude_file_path = $configuration['hdfs-site']['dfs.hosts.exclude']
-  ## Generate exclude file if exists value of $configuration['hdfs-exclude-file']['datanodes']
-  ## or value for $configuration['hdfs-exclude-file']['datanodes'] is empty
-  if (hdp_is_empty($configuration) == false and
-    hdp_is_empty($configuration['hdfs-exclude-file']) == false) and
-    (hdp_is_empty($configuration['hdfs-exclude-file']['datanodes']) == false)
-    or has_key($configuration['hdfs-exclude-file'], 'datanodes') {
-    ##Create file with list of excluding hosts
-    $exlude_hosts_list = hdp_array_from_comma_list($configuration['hdfs-exclude-file']['datanodes'])
-    file { $exlude_file_path :
-      ensure => file,
-      content => template('hdp-hadoop/exclude_hosts_list.erb')
-    }
-  }
-}
-
-
-
-

+ 0 - 170
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/service_check.pp

@@ -1,170 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::hdfs::service_check()
-{
-  $unique = hdp_unique_id_and_date()
-  $dir = '/tmp'
-  $tmp_file = "${dir}/${unique}"
-
-  $safemode_command = "dfsadmin -safemode get | grep OFF"
-
-  $create_dir_cmd = "fs -mkdir ${dir} ; hadoop fs -chmod -R 777 ${dir}"
-  $test_dir_exists = "hadoop fs -test -e ${dir}" #TODO: may fix up fact that test needs explicit hadoop while command does not
-  $cleanup_cmd = "fs -rm ${tmp_file}"
-  #cleanup put below to handle retries; if retrying there wil be a stale file that needs cleanup; exit code is fn of second command
-  $create_file_cmd = "${cleanup_cmd}; hadoop fs -put /etc/passwd ${tmp_file}" #TODO: inconsistent that second comamnd needs hadoop
-  $test_cmd = "fs -test -e ${tmp_file}"
-
-  anchor { 'hdp-hadoop::hdfs::service_check::begin':}
-
-  hdp-hadoop::exec-hadoop { 'hdfs::service_check::check_safemode':
-    command   => $safemode_command,
-    tries     => 20,
-    try_sleep => 15,
-    logoutput => true,
-    user      => $hdp::params::smokeuser,
-    require   => Anchor['hdp-hadoop::hdfs::service_check::begin']
-  }
-
-  hdp-hadoop::exec-hadoop { 'hdfs::service_check::create_dir':
-    command   => $create_dir_cmd,
-    unless    => $test_dir_exists,
-    tries     => 3,
-    try_sleep => 5,
-    user      => $hdp::params::smokeuser,
-    require   => Hdp-hadoop::Exec-hadoop['hdfs::service_check::check_safemode']
-  }
-
-  hdp-hadoop::exec-hadoop { 'hdfs::service_check::create_file':
-    command   => $create_file_cmd,
-    tries     => 3,
-    try_sleep => 5,
-    user      => $hdp::params::smokeuser,
-    require   => Hdp-hadoop::Exec-hadoop['hdfs::service_check::create_dir'],
-    notify    => Hdp-hadoop::Exec-hadoop['hdfs::service_check::test']
-  }
-
-
-   #TODO: put in after testing
- #  hdp-hadoop::exec-hadoop { 'hdfs::service_check::cleanup':
- #   command     => $cleanup_cmd,
- #   refreshonly => true,
- #   require     => Hdp-hadoop::Exec-hadoop['hdfs::service_check::test'],
- #   before      => Anchor['hdp-hadoop::hdfs::service_check::end']
-  #}
-
-  hdp-hadoop::exec-hadoop { 'hdfs::service_check::test':
-    command     => $test_cmd,
-    refreshonly => true,
-    user      => $hdp::params::smokeuser,
-    require     => Hdp-hadoop::Exec-hadoop['hdfs::service_check::create_file'],
-    before      => Anchor['hdp-hadoop::hdfs::service_check::journalnode_check:begin']
-  }
-
-  anchor { 'hdp-hadoop::hdfs::service_check::journalnode_check:begin':}
-
-  if hdp_is_empty($hdp::params::journalnode_hosts) {
-    ##No journalnode hosts
-    Anchor['hdp-hadoop::hdfs::service_check::journalnode_check:begin'] ->
-      Anchor['hdp-hadoop::hdfs::service_check::journalnode_check:end']
-
-  } else {
-    ## Cluster has journalnode hosts, run test of journalnodes
-    $journalnode_hosts_comma_sep = hdp_comma_list_from_array($hdp::params::journalnode_hosts)
-    class { 'hdp-hadoop::journalnode::service_check':
-      journalnode_hosts => $journalnode_hosts_comma_sep,
-      require          => Anchor['hdp-hadoop::hdfs::service_check::journalnode_check:begin'],
-      before           => Anchor['hdp-hadoop::hdfs::service_check::journalnode_check:end']
-    }
-  }
-
-  anchor { 'hdp-hadoop::hdfs::service_check::journalnode_check:end':} ->
-    anchor { 'hdp-hadoop::hdfs::service_check::zkfc_check:begin':}
-
-  if hdp_is_empty($hdp::params::zkfc_hosts) {
-    ## No zkfc hosts
-    Anchor['hdp-hadoop::hdfs::service_check::zkfc_check:begin'] ->
-      Anchor['hdp-hadoop::hdfs::service_check::zkfc_check:end']
-  } else {
-    ## Cluster has zkfc hosts, run test of local zkfc daemon if current host
-    ## is namenode. If namenode has not ZKFC installed, it is also considered
-    ## as a misconfiguration.
-    if ($hdp::params::is_namenode_master) {
-      class { 'hdp-hadoop::zkfc::service_check':
-        require          => Anchor['hdp-hadoop::hdfs::service_check::zkfc_check:begin'],
-        before           => Anchor['hdp-hadoop::hdfs::service_check::zkfc_check:end']
-      }
-    }
-  }
-
-  anchor { 'hdp-hadoop::hdfs::service_check::zkfc_check:end':} ->
-    anchor{ 'hdp-hadoop::hdfs::service_check::end':}
-
-}
-
-class hdp-hadoop::journalnode::service_check($journalnode_hosts)
-{
-  $journalnode_port = $hdp::params::journalnode_port
-  $smoke_test_user = $hdp::params::smokeuser
-  
-  $checkWebUIFileName = "checkWebUI.py"
-  $checkWebUIFilePath = "/tmp/$checkWebUIFileName"
-
-  $checkWebUICmd = "su - ${smoke_test_user} -c 'python $checkWebUIFilePath -m $journalnode_hosts -p $journalnode_port'"
-
-  file { $checkWebUIFilePath:
-    ensure => present,
-    source => "puppet:///modules/hdp-hadoop/$checkWebUIFileName",
-    mode => '0755'
-  }
-
-  exec { $checkWebUIFilePath:
-    command   => $checkWebUICmd,
-    tries     => 3,
-    try_sleep => 5,
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    logoutput => "true"
-}
-  anchor{"hdp-hadoop::smoketest::begin":} -> File[$checkWebUIFilePath] -> Exec[$checkWebUIFilePath] -> anchor{"hdp-hadoop::smoketest::end":}
-}
-
-class hdp-hadoop::zkfc::service_check() inherits hdp-hadoop::params
-{
-  $hdfs_user = $hdp::params::hdfs_user
-  $pid_dir = "${hdp-hadoop::params::hadoop_pid_dir_prefix}/${hdfs_user}"
-  $pid_file = "${pid_dir}/hadoop-${hdfs_user}-zkfc.pid"
-
-  # Here we check if pid file exists and if yes, then we run 'ps pid' command
-  # that returns 1 if process is not running
-  $check_zkfc_process_cmd = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
-
-  exec { $check_zkfc_process_cmd:
-    command   => $check_zkfc_process_cmd,
-    tries     => 3,
-    try_sleep => 5,
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    logoutput => "true"
-  }
-
-  anchor{"hdp-hadoop::zkfc::service_check::begin":} -> Exec[$check_zkfc_process_cmd] ->
-    anchor{"hdp-hadoop::zkfc::service_check::end":}
-
-}

+ 0 - 547
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp

@@ -1,547 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#singleton for use with <||> form so that namenode, datanode, etc can pass state to hdp-hadoop and still use include
-define hdp-hadoop::common(
-  $service_state
-)
-{
-  class { 'hdp-hadoop':
-    service_state => $service_state
-  }
-  anchor{'hdp-hadoop::common::begin':} -> Class['hdp-hadoop'] -> anchor{'hdp-hadoop::common::end':} 
-}
-
-class hdp-hadoop::initialize()
-{
-  if ($hdp::params::component_exists['hdp-hadoop'] == true) {
-  } else {
-    $hdp::params::component_exists['hdp-hadoop'] = true
-  }
-  hdp-hadoop::common { 'common':}
-  anchor{'hdp-hadoop::initialize::begin':} -> Hdp-hadoop::Common['common'] -> anchor{'hdp-hadoop::initialize::end':}
-
-  # Configs generation
-  debug('##Configs generation for hdp-hadoop')
-
-  if has_key($configuration, 'mapred-queue-acls') {
-    configgenerator::configfile{'mapred-queue-acls': 
-      modulespath => $hdp-hadoop::params::conf_dir,
-      filename => 'mapred-queue-acls.xml',
-      module => 'hdp-hadoop',
-      configuration => $configuration['mapred-queue-acls'],
-      owner => $hdp-hadoop::params::mapred_user,
-      group => $hdp::params::user_group
-    }
-  } else { # Manually overriding ownership of file installed by hadoop package
-    file { "${hdp-hadoop::params::conf_dir}/mapred-queue-acls.xml":
-      owner => $hdp-hadoop::params::mapred_user,
-      group => $hdp::params::user_group
-    }
-  }
-  
-  if has_key($configuration, 'hadoop-policy') {
-    configgenerator::configfile{'hadoop-policy': 
-      modulespath => $hdp-hadoop::params::conf_dir,
-      filename => 'hadoop-policy.xml',
-      module => 'hdp-hadoop',
-      configuration => $configuration['hadoop-policy'],
-      owner => $hdp-hadoop::params::hdfs_user,
-      group => $hdp::params::user_group
-    }
-  } else { # Manually overriding ownership of file installed by hadoop package
-    file { "${hdp-hadoop::params::conf_dir}/hadoop-policy.xml":
-      owner => $hdp-hadoop::params::hdfs_user,
-      group => $hdp::params::user_group
-    }
-  }
-
-  if has_key($configuration, 'core-site') {
-      configgenerator::configfile{'core-site': 
-        modulespath => $hdp-hadoop::params::conf_dir,
-        filename => 'core-site.xml',
-        module => 'hdp-hadoop',
-        configuration => $configuration['core-site'],
-        owner => $hdp-hadoop::params::hdfs_user,
-        group => $hdp::params::user_group
-      }
-  } else { # Manually overriding ownership of file installed by hadoop package
-    file { "${hdp-hadoop::params::conf_dir}/core-site.xml":
-      owner => $hdp-hadoop::params::hdfs_user,
-      group => $hdp::params::user_group
-    }
-  }
-
-  if has_key($configuration, 'mapred-site') {
-    configgenerator::configfile{'mapred-site': 
-      modulespath => $hdp-hadoop::params::conf_dir,
-      filename => 'mapred-site.xml',
-      module => 'hdp-hadoop',
-      configuration => $configuration['mapred-site'],
-      owner => $hdp-hadoop::params::mapred_user,
-      group => $hdp::params::user_group
-    }
-  } else { # Manually overriding ownership of file installed by hadoop package
-    file { "${hdp-hadoop::params::conf_dir}/mapred-site.xml":
-      owner => $hdp-hadoop::params::mapred_user,
-      group => $hdp::params::user_group
-    }
-  }
-
-  $task_log4j_properties_location = "${hdp-hadoop::params::conf_dir}/task-log4j.properties"
-  
-  file { $task_log4j_properties_location:
-    owner   => $hdp-hadoop::params::mapred_user,
-    group   => $hdp::params::user_group,
-    mode    => 644,
-    ensure  => present,
-    source  => "puppet:///modules/hdp-hadoop/task-log4j.properties",
-    replace => false
-  }
-
-  if has_key($configuration, 'capacity-scheduler') {
-    configgenerator::configfile{'capacity-scheduler':
-      modulespath => $hdp-hadoop::params::conf_dir,
-      filename => 'capacity-scheduler.xml',
-      module => 'hdp-hadoop',
-      configuration => $configuration['capacity-scheduler'],
-      owner => $hdp-hadoop::params::hdfs_user,
-      group => $hdp::params::user_group,
-    }
-  } else { # Manually overriding ownership of file installed by hadoop package
-    file { "${hdp-hadoop::params::conf_dir}/capacity-scheduler.xml":
-      owner => $hdp-hadoop::params::hdfs_user,
-      group => $hdp::params::user_group
-    }
-  } 
-
-
-  if has_key($configuration, 'hdfs-site') {
-    configgenerator::configfile{'hdfs-site': 
-      modulespath => $hdp-hadoop::params::conf_dir,
-      filename => 'hdfs-site.xml',
-      module => 'hdp-hadoop',
-      configuration => $configuration['hdfs-site'],
-      owner => $hdp-hadoop::params::hdfs_user,
-      group => $hdp::params::user_group
-    }
-  } else { # Manually overriding ownership of file installed by hadoop package
-    file { "${hdp-hadoop::params::conf_dir}/hdfs-site.xml":
-      owner => $hdp-hadoop::params::hdfs_user,
-      group => $hdp::params::user_group
-    }
-  }
-
-  if has_key($configuration, 'hdfs-exclude-file') {
-    hdp-hadoop::hdfs::generate_exclude_file{'exclude_file':}
-  }
-
-  hdp::package {'ambari-log4j':
-    package_type  => 'ambari-log4j'
-  }
-
-  file { '/usr/lib/hadoop/lib/hadoop-tools.jar':
-    ensure => 'link',
-    target => '/usr/lib/hadoop/hadoop-tools.jar',
-    mode => 755,
-  }
-
-  file { "${hdp-hadoop::params::conf_dir}/configuration.xsl":
-    owner => $hdp-hadoop::params::hdfs_user,
-    group => $hdp::params::user_group
-  }
-
-  file { "${hdp-hadoop::params::conf_dir}/fair-scheduler.xml":
-    owner => $hdp-hadoop::params::mapred_user,
-    group => $hdp::params::user_group
-  }
-
-  file { "${hdp-hadoop::params::conf_dir}/masters":
-    owner => $hdp-hadoop::params::hdfs_user,
-    group => $hdp::params::user_group
-  }
-
-  file { "${hdp-hadoop::params::conf_dir}/ssl-client.xml.example":
-    owner => $hdp-hadoop::params::mapred_user,
-    group => $hdp::params::user_group
-  }
-
-  file { "${hdp-hadoop::params::conf_dir}/ssl-server.xml.example":
-    owner => $hdp-hadoop::params::mapred_user,
-    group => $hdp::params::user_group
-  }
-
-  if (hdp_get_major_stack_version($hdp::params::stack_version) >= 2) {
-    if (hdp_is_empty($configuration) == false and hdp_is_empty($configuration['hdfs-site']) == false) {
-      if (hdp_is_empty($configuration['hdfs-site']['dfs.hosts.exclude']) == false) and
-         (hdp_is_empty($configuration['hdfs-exclude-file']) or
-          has_key($configuration['hdfs-exclude-file'], 'datanodes') == false) {
-        $exlude_file_path = $configuration['hdfs-site']['dfs.hosts.exclude']
-        file { $exlude_file_path :
-        ensure => present,
-        owner => $hdp-hadoop::params::hdfs_user,
-        group => $hdp::params::user_group
-        }
-      }
-      if (hdp_is_empty($hdp::params::slave_hosts) == false and hdp_is_empty($configuration['hdfs-site']['dfs.hosts']) == false) {
-        $include_file_path = $configuration['hdfs-site']['dfs.hosts']
-        $include_hosts_list = $hdp::params::slave_hosts
-        file { $include_file_path :
-        ensure => present,
-        owner => $hdp-hadoop::params::hdfs_user,
-        group => $hdp::params::user_group,
-        content => template('hdp-hadoop/include_hosts_list.erb')
-        }
-      }
-    }
-  }
-
-}
-
-class hdp-hadoop(
-  $service_state
-)
-{
-  include hdp-hadoop::params
-  $hadoop_config_dir = $hdp-hadoop::params::conf_dir
-  $mapred_user = $hdp-hadoop::params::mapred_user  
-  $hdfs_user = $hdp-hadoop::params::hdfs_user
-  $hadoop_tmp_dir = $hdp-hadoop::params::hadoop_tmp_dir
-
-  anchor{'hdp-hadoop::begin':} 
-  anchor{'hdp-hadoop::end':} 
-
-  if ($service_state=='uninstalled') {
-    hdp-hadoop::package { 'hadoop':
-      ensure => 'uninstalled'
-    }
-
-    hdp::directory_recursive_create { $hadoop_config_dir:
-      service_state => $::service_state,
-      force => true
-    }
-
-    Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Package<||> -> Hdp::Directory_recursive_create[$hadoop_config_dir] -> Anchor['hdp-hadoop::end']
-  } else {
-    
-    hdp-hadoop::package { 'hadoop':}
-
-    #Replace limits config file
-    hdp::configfile {"${hdp::params::limits_conf_dir}/hdfs.conf":
-      component => 'hadoop',
-      owner => 'root',
-      group => 'root',
-      require => Hdp-hadoop::Package['hadoop'],
-      before  => Anchor['hdp-hadoop::end'],
-      mode => 644    
-    }
-
-    hdp::directory_recursive_create { $hadoop_config_dir:
-      service_state => $::service_state,
-      force => true,
-      owner => 'root',
-      group => 'root'
-    }
- 
-    hdp::user{ 'hdfs_user':
-      user_name => $hdfs_user,
-      groups => [$hdp::params::user_group]
-    }
-    
-    hdp::user { 'mapred_user':
-      user_name => $mapred_user,
-      groups => [$hdp::params::user_group]
-    }
-
-    $logdirprefix = $hdp-hadoop::params::hdfs_log_dir_prefix
-    hdp::directory_recursive_create { $logdirprefix: 
-        owner => 'root'
-    }
-    $piddirprefix = $hdp-hadoop::params::hadoop_pid_dir_prefix
-    hdp::directory_recursive_create { $piddirprefix: 
-        owner => 'root'
-    }
-
-    $dfs_domain_socket_path_dir = hdp_get_directory_from_filepath($hdp-hadoop::params::dfs_domain_socket_path)
-    hdp::directory_recursive_create { $dfs_domain_socket_path_dir:
-      owner => $hdfs_user,
-      group => $hdp::params::user_group,
-      mode  => '0644'
-    }
- 
-    #taskcontroller.cfg properties conditional on security
-    if ($hdp::params::security_enabled == true) {
-      file { "${hdp::params::hadoop_bin}/task-controller":
-        owner   => 'root',
-        group   => $hdp-hadoop::params::mapred_tt_group,
-        mode    => '6050',
-        require => Hdp-hadoop::Package['hadoop'],
-        before  => Anchor['hdp-hadoop::end']
-      }
-      $tc_owner = 'root'
-      $tc_mode = '0644'
-    } else {
-      $tc_owner = $hdfs_user
-      $tc_mode = undef
-    }
-    hdp-hadoop::configfile { 'taskcontroller.cfg' :
-      tag   => 'common',
-      owner => $tc_owner,
-      mode  => $tc_mode
-    }
-
-    $template_files = [ 'hadoop-env.sh', 'commons-logging.properties', 'slaves']
-    hdp-hadoop::configfile { $template_files:
-      tag   => 'common', 
-      owner => $hdfs_user
-    }
-
-    if (hdp_get_major_stack_version($hdp::params::stack_version) >= 2) {
-      hdp-hadoop::configfile { 'health_check' :
-        tag   => 'common',
-        owner => $hdfs_user,
-        template_tag => 'v2'
-      }
-    } else {
-      hdp-hadoop::configfile { 'health_check' :
-        tag   => 'common',
-        owner => $hdfs_user
-      }
-    }
-
-    # log4j.properties has to be installed just one time to prevent
-    # manual changes overwriting
-    if ($service_state=='installed_and_configured') {
-      hdp-hadoop::configfile { 'log4j.properties' :
-        tag   => 'common',
-        owner => $hdfs_user,
-      }
-    }
-
-    # updating log4j.properties with data which is sent from server
-    hdp-hadoop::update-log4j-properties { 'log4j.properties': }
-    
-    hdp-hadoop::configfile { 'hadoop-metrics2.properties' : 
-      tag   => 'common', 
-      owner => $hdfs_user,
-    }
-
-    # Copy database drivers for rca enablement
-    $server_db_name = $hdp::params::server_db_name
-    $hadoop_lib_home = $hdp::params::hadoop_lib_home
-    $db_driver_filename = $hdp::params::db_driver_file
-    $oracle_driver_url = $hdp::params::oracle_jdbc_url
-    $mysql_driver_url = $hdp::params::mysql_jdbc_url
-
-    if ($server_db_name == 'oracle' and $oracle_driver_url != "") {
-      $db_driver_dload_cmd = "curl -kf --retry 5 $oracle_driver_url -o ${hadoop_lib_home}/${db_driver_filename}"
-    } elsif ($server_db_name == 'mysql' and $mysql_driver_url != "") {
-      $db_driver_dload_cmd = "curl -kf --retry 5 $mysql_driver_url -o ${hadoop_lib_home}/${db_driver_filename}"
-    }
-    if ($db_driver_dload_cmd != undef) {
-      exec { '${db_driver_dload_cmd}':
-        command => $db_driver_dload_cmd,
-        unless  => "test -e ${hadoop_lib_home}/${db_driver_filename}",
-        creates => "${hadoop_lib_home}/${db_driver_filename}",
-        path    => ["/bin","/usr/bin/"],
-        require => Hdp-hadoop::Package['hadoop'],
-        before  => Anchor['hdp-hadoop::end']
-      }
-    }
-
-    if (hdp_get_major_stack_version($hdp::params::stack_version) >= 2) {
-      hdp::directory_recursive_create { "$hadoop_tmp_dir":
-        service_state => $service_state,
-        force => true,
-        owner => $hdfs_user
-      }
-    }
-
-    if (hdp_get_major_stack_version($hdp::params::stack_version) >= 2) {
-      Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Package<||> ->  Hdp::User<|title == $hdfs_user or title == $mapred_user|>  ->
-      Hdp::Directory_recursive_create[$hadoop_config_dir] -> Hdp-hadoop::Configfile<|tag == 'common'|> -> Hdp-hadoop::Update-log4j-properties['log4j.properties'] ->
-      Hdp::Directory_recursive_create[$logdirprefix] -> Hdp::Directory_recursive_create[$piddirprefix] -> Hdp::Directory_recursive_create["$hadoop_tmp_dir"] -> Anchor['hdp-hadoop::end']
-    } else {
-      Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Package<||> ->  Hdp::User<|title == $hdfs_user or title == $mapred_user|>  ->
-      Hdp::Directory_recursive_create[$hadoop_config_dir] -> Hdp-hadoop::Configfile<|tag == 'common'|> -> Hdp-hadoop::Update-log4j-properties['log4j.properties'] ->
-      Hdp::Directory_recursive_create[$logdirprefix] -> Hdp::Directory_recursive_create[$piddirprefix] -> Anchor['hdp-hadoop::end']
-    }
-
-  }
-}
-
-class hdp-hadoop::enable-ganglia()
-{
-  Hdp-hadoop::Configfile<|title  == 'hadoop-metrics2.properties'|>{template_tag => 'GANGLIA'}
-}
-
-###config file helper
-define hdp-hadoop::configfile(
-  $owner = undef,
-  $hadoop_conf_dir = $hdp-hadoop::params::conf_dir,
-  $mode = undef,
-  $namenode_host = undef,
-  $jtnode_host = undef,
-  $snamenode_host = undef,
-  $template_tag = undef,
-  $size = undef, #TODO: deprecate
-  $sizes = []
-) 
-{
-  #TODO: may need to be fixed 
-  if ($jtnode_host == undef) {
-    $calc_jtnode_host = $namenode_host
-  } else {
-    $calc_jtnode_host = $jtnode_host 
-  }
- 
-  #only set 32 if theer is a 32 bit component and no 64 bit components
-  if (64 in $sizes) {
-    $common_size = 64
-  } elsif (32 in $sizes) {
-    $common_size = 32
-  } else {
-    $common_size = 6
-  }
-  
-  hdp::configfile { "${hadoop_conf_dir}/${name}":
-    component      => 'hadoop',
-    owner          => $owner,
-    mode           => $mode,
-    namenode_host  => $namenode_host,
-    snamenode_host => $snamenode_host,
-    jtnode_host    => $calc_jtnode_host,
-    template_tag   => $template_tag,
-    size           => $common_size
-  }
-}
-
-#####
-define hdp-hadoop::exec-hadoop(
-  $command,
-  $unless = undef,
-  $refreshonly = undef,
-  $echo_yes = false,
-  $kinit_override = false,
-  $tries = 1,
-  $timeout = 900,
-  $try_sleep = undef,
-  $user = undef,
-  $logoutput = undef,
-  $onlyif = undef,
-  $path = undef
-)
-{
-  include hdp-hadoop::params
-  $security_enabled = $hdp::params::security_enabled
-  $conf_dir = $hdp-hadoop::params::conf_dir
-  $hdfs_user = $hdp-hadoop::params::hdfs_user
-  $hbase_user = $hdp-hadoop::params::hbase_user
-
-  if ($user == undef) {
-    $run_user = $hdfs_user
-  } else {
-    $run_user = $user
-  }
-
-  if (($security_enabled == true) and ($kinit_override == false)) {
-    if ($run_user in [$hdfs_user,'root']) {
-      $keytab = $hdp::params::hdfs_user_keytab
-      $principal = $hdfs_user
-    } elsif ($run_user in [$hbase_user]) {
-      $keytab = $hdp::params::hbase_user_keytab
-      $principal = $hbase_user
-    } else {
-      $keytab = $hdp::params::smokeuser_keytab
-      $principal = $hdp::params::smokeuser
-    }
-    $kinit_if_needed = "su - ${run_user} -c '${hdp::params::kinit_path_local} -kt ${keytab} ${principal}'"
-  } else {
-    $kinit_if_needed = ""
-  }
-  
-  if ($path == undef) {
-    if ($echo_yes == true) {
-      $cmd = "yes Y | hadoop --config ${conf_dir} ${command}"
-    } else {
-      $cmd = "hadoop --config ${conf_dir} ${command}"
-    } 
-    } else {
-      $cmd = "${path} ${command}"
-    }
-  
-  if ($kinit_if_needed != "") {
-    exec { "kinit_before_${cmd}":
-      command => $kinit_if_needed,
-      path => ['/bin'],
-      before => Hdp::Exec[$cmd]
-    }
-  }
-
-  hdp::exec { $cmd:
-    command     => $cmd,
-    user        => $run_user,
-    unless      => $unless,
-    refreshonly => $refreshonly,
-    tries       => $tries,
-    timeout     => $timeout,
-    try_sleep   => $try_sleep,
-    logoutput   => $logoutput,
-    onlyif      => $onlyif,
-  }
-}
-
-#####
-define hdp-hadoop::update-log4j-properties(
-  $hadoop_conf_dir = $hdp-hadoop::params::conf_dir
-)
-{
-  $properties = [
-    { name => 'ambari.jobhistory.database', value => $hdp-hadoop::params::ambari_db_rca_url },
-    { name => 'ambari.jobhistory.driver', value => $hdp-hadoop::params::ambari_db_rca_driver },
-    { name => 'ambari.jobhistory.user', value => $hdp-hadoop::params::ambari_db_rca_username },
-    { name => 'ambari.jobhistory.password', value => $hdp-hadoop::params::ambari_db_rca_password },
-    { name => 'ambari.jobhistory.logger', value => 'DEBUG,JHA' },
-
-    { name => 'log4j.appender.JHA', value => 'org.apache.ambari.log4j.hadoop.mapreduce.jobhistory.JobHistoryAppender' },
-    { name => 'log4j.appender.JHA.database', value => '${ambari.jobhistory.database}' },
-    { name => 'log4j.appender.JHA.driver', value => '${ambari.jobhistory.driver}' },
-    { name => 'log4j.appender.JHA.user', value => '${ambari.jobhistory.user}' },
-    { name => 'log4j.appender.JHA.password', value => '${ambari.jobhistory.password}' },
-
-    { name => 'log4j.logger.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger', value => '${ambari.jobhistory.logger}' },
-    { name => 'log4j.additivity.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger', value => 'true' }
-  ]
-  hdp-hadoop::update-log4j-property { $properties :
-    log4j_file      => $name,
-    hadoop_conf_dir => $hadoop_conf_dir
-  }
-}
-
-#####
-define hdp-hadoop::update-log4j-property(
-  $log4j_file,
-  $hadoop_conf_dir = $hdp-hadoop::params::conf_dir
-)
-{
-  hdp::exec{ "sed -i 's~\\(${hdp-hadoop::params::rca_disabled_prefix}\\)\\?${name[name]}=.*~${hdp-hadoop::params::rca_prefix}${name[name]}=${name[value]}~' ${hadoop_conf_dir}/${log4j_file}":
-    command => "sed -i 's~\\(${hdp-hadoop::params::rca_disabled_prefix}\\)\\?${name[name]}=.*~${hdp-hadoop::params::rca_prefix}${name[name]}=${name[value]}~' ${hadoop_conf_dir}/${log4j_file}"
-  }
-}

+ 0 - 96
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/jobtracker.pp

@@ -1,96 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::jobtracker(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-hadoop::params
-{
-  $hdp::params::service_exists['hdp-hadoop::jobtracker'] = true
-  Hdp-hadoop::Common<||>{service_state => $service_state}
-  Hdp-hadoop::Package<||>{include_64_bit => true}
-  Hdp-hadoop::Configfile<||>{sizes +> 64}
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {
-    $mapred_user = $hdp-hadoop::params::mapred_user
-    $mapred_local_dir = $hdp-hadoop::params::mapred_local_dir 
-  
-    #adds package, users and directories, and common hadoop configs
-    include hdp-hadoop::initialize
-
-    if ( ($service_state == 'installed_and_configured') and
-         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-      $masterHost = $kerberos_adminclient_host[0]
-      hdp::download_keytab { 'jobtracker_service_keytab' :
-        masterhost => $masterHost,
-        keytabdst => "${$keytab_path}/jt.service.keytab",
-        keytabfile => 'jt.service.keytab',
-        owner => $hdp-hadoop::params::mapred_user
-      }
-    }
-     
-    hdp-hadoop::jobtracker::create_local_dirs { $mapred_local_dir: 
-      service_state => $service_state
-    }
-
-    #TODO: cleanup 
-    Hdp-Hadoop::Configfile<||>{jtnode_host => $hdp::params::host_address}
-
-    #TODO: do we keep precondition here?
-    if ($service_state == 'running' and $hdp-hadoop::params::use_preconditions == true) {
-      class { 'hdp-hadoop::hdfs::service_check':
-        before => Hdp-hadoop::Service['jobtracker'],
-        require => Class['hdp-hadoop']
-      }
-    }
-
-    hdp-hadoop::service{ 'jobtracker':
-      ensure       => $service_state,
-      user         => $mapred_user
-    }
-  
-    hdp-hadoop::service{ 'historyserver':
-      ensure         => $service_state,
-      user           => $mapred_user,
-      create_pid_dir => false,
-      create_log_dir => false
-    }
-
-    #top level does not need anchors
-    Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Service['jobtracker'] -> Hdp-hadoop::Service['historyserver'] 
-    -> Anchor['hdp-hadoop::end']
-    Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Jobtracker::Create_local_dirs<||> -> Hdp-hadoop::Service['jobtracker'] 
-    -> Anchor['hdp-hadoop::end']
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-define hdp-hadoop::jobtracker::create_local_dirs($service_state)
-{
-    $dirs = hdp_array_from_comma_list($name)
-    hdp::directory_recursive_create { $dirs :
-      owner => $hdp-hadoop::params::mapred_user,
-      mode => '0755',
-      service_state => $service_state,
-      force => true
-    }
-}

+ 0 - 29
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/jobtracker/service_check.pp

@@ -1,29 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::jobtracker::service_check()
-{
-  hdp-hadoop::exec-hadoop { 'jobtracker::service_check':
-    command   => 'job -list',
-    tries     => 3,
-    try_sleep => 5,
-    user => $hdp::params::smokeuser
-  }
-}

+ 0 - 60
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/journalnode.pp

@@ -1,60 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::journalnode(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-hadoop::params 
-{
-
-  $hdp::params::service_exists['hdp-hadoop::journalnode'] = true
-  
-  Hdp-hadoop::Common<||>{service_state => $service_state}
-  Hdp-hadoop::Package<||>{include_64_bit => true}
-  Hdp-hadoop::Configfile<||>{sizes +> 64}
-  
-  $jn_edits_dir = $hdp-hadoop::params::jn_edits_dir
-  $hdfs_user = $hdp-hadoop::params::hdfs_user
-  
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
-  
-    #adds package, users and directories, and common hadoop configs
-    include hdp-hadoop::initialize
-    
-    hdp::directory_recursive_create{ $jn_edits_dir:
-        service_state => $service_state,
-        force => true,
-        owner => $hdfs_user
-      }
-      
-    hdp-hadoop::service{ 'journalnode':
-      ensure         => $service_state,
-      user           => $hdp-hadoop::params::hdfs_user,
-      create_pid_dir => true,
-      create_log_dir => true
-    }
-    
-    #top level does not need anchors
-    Anchor['hdp-hadoop::begin'] -> Hdp::Directory_recursive_create[$jn_edits_dir] -> Hdp-hadoop::Service['journalnode'] -> Anchor['hdp-hadoop::end'] 
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}

+ 0 - 75
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/mapred/service_check.pp

@@ -1,75 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::mapred::service_check() 
-{
-  $smoke_test_user = $hdp::params::smokeuser
-  $jar_location = $hdp::params::hadoop_jar_location
-  $input_file = 'mapredsmokeinput'
-  $output_file = "mapredsmokeoutput"
-
-  $cleanup_cmd = "dfs -rmr ${output_file} ${input_file}"
-  #cleanup put below to handle retries; if retrying there wil be a stale file that needs cleanup; exit code is fn of second command
-  $create_file_cmd = "$cleanup_cmd ; hadoop dfs -put /etc/passwd ${input_file} " #TODO: inconsistent that second comamnd needs hadoop
-  $test_cmd = "fs -test -e ${output_file}" 
-  $run_wordcount_job = "jar ${jar_location}/hadoop-examples.jar  wordcount ${input_file} ${output_file}"
-  
-  anchor { 'hdp-hadoop::mapred::service_check::begin':}
-
-  hdp-hadoop::exec-hadoop { 'mapred::service_check::create_file':
-    command   => $create_file_cmd,
-    tries     => 1,
-    try_sleep => 5,
-    require   => Anchor['hdp-hadoop::mapred::service_check::begin'],
-  #  notify    => Hdp-hadoop::Exec-hadoop['mapred::service_check::run_wordcount'],
-    user      => $smoke_test_user
-  }
-
-  hdp-hadoop::exec-hadoop { 'mapred::service_check::run_wordcount':
-    command   => $run_wordcount_job,
-    tries     => 1,
-    try_sleep => 5,
-    require   => Hdp-hadoop::Exec-hadoop['mapred::service_check::create_file'],
-    notify    => Hdp-hadoop::Exec-hadoop['mapred::service_check::test'],
-    user      => $smoke_test_user,
-    logoutput => "true"
-  }
-
-#  exec { 'runjob':
-#    command   => "hadoop jar ${jar_location}/hadoop-examples.jar  wordcount ${input_file} ${output_file}",
-#    tries     => 1,
-#    try_sleep => 5,
-#    require   => Hdp-hadoop::Exec-hadoop['mapred::service_check::create_file'],
-#    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-#    notify    => Hdp-hadoop::Exec-hadoop['mapred::service_check::test'],
-#    logoutput => "true",
-#    user      => $smoke_test_user
-#  }
-
-  hdp-hadoop::exec-hadoop { 'mapred::service_check::test':
-    command     => $test_cmd,
-    refreshonly => true,
-    require     => Hdp-hadoop::Exec-hadoop['mapred::service_check::run_wordcount'],
-    before      => Anchor['hdp-hadoop::mapred::service_check::end'], #TODO: remove after testing
-    user        => $smoke_test_user
-  }
-  
-  anchor{ 'hdp-hadoop::mapred::service_check::end':}
-}

+ 0 - 285
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode.pp

@@ -1,285 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::namenode(
-  $service_state = $hdp::params::cluster_service_state,
-  $slave_hosts = [],
-  $format = true,
-  $opts = {}
-) inherits hdp-hadoop::params
-{
-  $hdp::params::service_exists['hdp-hadoop::namenode'] = true
-
-  Hdp-hadoop::Common<||>{service_state => $service_state}
-  Hdp-hadoop::Package<||>{include_64_bit => true}
-  Hdp-hadoop::Configfile<||>{sizes +> 64}
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
-    $dfs_name_dir = $hdp-hadoop::params::dfs_name_dir
-  
-    #adds package, users and directories, and common hadoop configs
-    include hdp-hadoop::initialize
-
-    if ( ($service_state == 'installed_and_configured') and 
-         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-      $masterHost = $kerberos_adminclient_host[0]
-      hdp::download_keytab { 'namenode_service_keytab' :
-        masterhost => $masterHost,
-        keytabdst => "${$keytab_path}/nn.service.keytab",
-        keytabfile => 'nn.service.keytab',
-        owner => $hdp-hadoop::params::hdfs_user
-      }
-      hdp::download_keytab { 'namenode_hdfs_headless_keytab' :   
-        masterhost => $masterHost,
-        keytabdst => "${$keytab_path}/hdfs.headless.keytab",
-        keytabfile => 'hdfs.headless.keytab', 
-        owner => $hdp-hadoop::params::hdfs_user, 
-        hostnameInPrincipals => 'no'
-      }
-      hdp::download_keytab { 'namenode_spnego_keytab' :   
-        masterhost => $masterHost,
-        keytabdst => "${$keytab_path}/spnego.service.keytab",
-        keytabfile => 'spnego.service.keytab', 
-        owner => $hdp-hadoop::params::hdfs_user, 
-        mode => '0440',
-        group => $hdp::params::user_group
-      }
-    }
-
-    hdp-hadoop::namenode::create_name_dirs { $dfs_name_dir: 
-      service_state => $service_state
-    }
-   
-    Hdp-Hadoop::Configfile<||>{namenode_host => $hdp::params::host_address}
-    Hdp::Configfile<||>{namenode_host => $hdp::params::host_address} #for components other than hadoop (e.g., hbase) 
-  
-    if ($service_state == 'running' and $format == true) {
-      class {'hdp-hadoop::namenode::format' : }
-    }
-
-    hdp-hadoop::service{ 'namenode':
-      ensure       => $service_state,
-      user         => $hdp-hadoop::params::hdfs_user,
-      initial_wait => hdp_option_value($opts,'wait')
-    }
-
-    hdp-hadoop::namenode::create_app_directories { 'create_app_directories' :
-      service_state => $service_state
-    }
-
-    hdp-hadoop::namenode::create_user_directories { 'create_user_directories' :
-      service_state => $service_state
-    }
-
-    Anchor['hdp-hadoop::begin'] ->
-    Hdp-hadoop::Namenode::Create_name_dirs<||> ->
-    Hdp-hadoop::Service['namenode'] ->
-    Hdp-hadoop::Namenode::Create_app_directories<||> ->
-    Hdp-hadoop::Namenode::Create_user_directories<||> ->
-    Anchor['hdp-hadoop::end']
-
-    if ($service_state == 'running' and $format == true) {
-      Anchor['hdp-hadoop::begin'] ->
-      Hdp-hadoop::Namenode::Create_name_dirs<||> ->
-      Class['hdp-hadoop::namenode::format'] ->
-      Hdp-hadoop::Service['namenode'] ->
-      Anchor['hdp-hadoop::end']
-    }
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-define hdp-hadoop::namenode::create_name_dirs($service_state)
-{
-  $dirs = hdp_array_from_comma_list($name)
-  hdp::directory_recursive_create { $dirs :
-    owner => $hdp-hadoop::params::hdfs_user,
-    mode => '0755',
-    service_state => $service_state,
-    force => true
-  }
-}
-
-define hdp-hadoop::namenode::create_app_directories($service_state)
-{
-
-  if ($service_state == 'running') {
-   
-    hdp-hadoop::hdfs::directory{ "/tmp" :
-      service_state => $service_state,
-      owner => $hdp-hadoop::params::hdfs_user,
-      mode => '777'
-    }
-
-    hdp-hadoop::hdfs::directory{ '/mapred' :
-      service_state => $service_state,
-      owner         => $hdp-hadoop::params::mapred_user
-    }
-
-    hdp-hadoop::hdfs::directory{ '/mapred/system' :
-      service_state => $service_state,
-      owner         => $hdp-hadoop::params::mapred_user
-    }
-
-    Hdp-hadoop::Hdfs::Directory['/mapred'] -> Hdp-hadoop::Hdfs::Directory['/mapred/system']
-
-    if ($hdp::params::hbase_master_hosts != "") {
-
-      hdp-hadoop::hdfs::directory { $hdp-hadoop::params::hdfs_root_dir:
-        owner         => $hdp::params::hbase_user,
-        service_state => $service_state
-      }
-
-      $hbase_staging_dir = $hdp::params::hbase_staging_dir
-      hdp-hadoop::hdfs::directory { $hbase_staging_dir:
-        owner         => $hdp::params::hbase_user,
-        service_state => $service_state,
-        mode             => '711'
-      }
-    }
-
-    if ($hdp::params::hive_server_host != "") {
-      $hive_user = $hdp::params::hive_user
-      $hive_apps_whs_dir = $hdp::params::hive_apps_whs_dir
-
-      hdp-hadoop::hdfs::directory{ $hive_apps_whs_dir:
-        service_state   => $service_state,
-        owner            => $hive_user,
-        mode             => '777',
-        recursive_chmod  => true
-      }
-    }
-
-    if ($hdp::params::webhcat_server_host != "") {
-      $webhcat_user = $hdp::params::webhcat_user
-      $webhcat_apps_dir = hdp_get_directory_from_filepath(hdp_get_dir_from_url(hdp_default("webhcat-site/templeton.streaming.jar",""), "/apps/webhcat"))
-
-      hdp-hadoop::hdfs::directory{ $webhcat_apps_dir:
-        service_state => $service_state,
-        owner => $webhcat_user,
-        mode  => '755',
-        recursive_chmod => true
-      }
-    }
-
-    if (hdp_get_major_stack_version($hdp::params::stack_version) >= 2) {
-      if ($hdp::params::nm_hosts != "") {
-        if ($hdp::params::yarn_log_aggregation_enabled == "true") {
-          $yarn_user = $hdp::params::yarn_user
-          $yarn_nm_app_log_dir = $hdp::params::yarn_nm_app_log_dir
-
-          hdp-hadoop::hdfs::directory{ $yarn_nm_app_log_dir:
-            service_state => $service_state,
-            owner => $yarn_user,
-            group => $hdp::params::user_group,
-            mode  => '1777',
-            recursive_chmod => true
-          }
-        }
-      }
-
-
-      if ($hdp::params::hs_host != "") {
-        $mapred_user = $hdp::params::mapred_user
-        $mapreduce_jobhistory_intermediate_done_dir = $hdp::params::mapreduce_jobhistory_intermediate_done_dir
-        $group = $hdp::params::user_group
-        $mapreduce_jobhistory_done_dir = $hdp::params::mapreduce_jobhistory_done_dir
-
-        hdp-hadoop::hdfs::directory{ $mapreduce_jobhistory_intermediate_done_dir:
-          service_state => $service_state,
-          owner => $mapred_user,
-          group => $group,
-          mode  => '1777'
-        }
-
-        hdp-hadoop::hdfs::directory{ $mapreduce_jobhistory_done_dir:
-          service_state => $service_state,
-          owner => $mapred_user,
-          group => $group,
-          mode  => '1777'
-        }
-      }
-    }
-  }
-}
-
-
-define hdp-hadoop::namenode::create_user_directories($service_state)
-{
-  if ($service_state == 'running') {
-    $smoke_hdfs_user_dir = $hdp::params::smoke_hdfs_user_dir
-
-    $smoke_user_dir_item="$smoke_hdfs_user_dir,"
-
-    if ($hdp::params::hive_server_host != "") {
-      $hive_hdfs_user_dir = $hdp::params::hive_hdfs_user_dir
-      $hive_dir_item="$hive_hdfs_user_dir,"
-    } else {
-      $hive_dir_item=""
-    }
-
-    if ($hdp::params::oozie_server != "") {
-      $oozie_hdfs_user_dir = $hdp::params::oozie_hdfs_user_dir
-      $oozie_dir_item="$oozie_hdfs_user_dir,"
-    } else {
-      $oozie_dir_item=""
-    }
-    
-    if ($hdp::params::webhcat_server_host != "") {
-      $hcat_hdfs_user_dir = $hdp::params::hcat_hdfs_user_dir
-      $webhcat_hdfs_user_dir = $hdp::params::webhcat_hdfs_user_dir
-      $webhcat_dir_item="$webhcat_hdfs_user_dir,"
-      if ($hcat_hdfs_user_dir != webhcat_hdfs_user_dir) {
-        $hcat_dir_item="$hcat_hdfs_user_dir,"
-      } else {
-        $hcat_dir_item=""
-      }
-    } else {
-      $webhcat_dir_item=""
-    }
-
-    $users_dir_list_comm_sep = "$smoke_user_dir_item $hive_dir_item $oozie_dir_item $hcat_dir_item $webhcat_dir_item"
-
-    #Get unique users directories set
-    $users_dirs_set = hdp_set_from_comma_list($users_dir_list_comm_sep)
-
-    hdp-hadoop::namenode::create_user_directory{ $users_dirs_set:
-      service_state => $service_state
-    }
-  }
-  
-}
-
-define hdp-hadoop::namenode::create_user_directory($service_state)
-{
-  
-  $owner = hdp_hadoop_get_owner($name)
-  $mode = hdp_hadoop_get_mode($name)
-  debug("## Creating user directory: $name, owner: $owner, mode: $mode")
-  hdp-hadoop::hdfs::directory{ $name:
-   service_state   => $service_state,
-   mode            => $mode,
-   owner           => $owner,
-   recursive_chmod => true
-  }
-}
-

+ 0 - 61
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode/format.pp

@@ -1,61 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::namenode::format(
-  $force = false
-)
-{
-  $mark_dir = $hdp-hadoop::params::namenode_formatted_mark_dir
-  $dfs_name_dir = $hdp-hadoop::params::dfs_name_dir
-  $hdfs_user = $hdp::params::hdfs_user
-  $hadoop_conf_dir = $hdp-hadoop::params::conf_dir
-
-  # Avoid formatting standby namenode in a HA cluster
-  if ($hdp::params::dfs_ha_enabled == false) {
-    if ($force == true) {
-        hdp-hadoop::exec-hadoop { 'namenode -format' :
-        command => 'namenode -format',
-        kinit_override => true,
-        notify  => Hdp::Exec['set namenode mark']
-      }
-    } else {
-
-      file { '/tmp/checkForFormat.sh':
-        ensure => present,
-        source => "puppet:///modules/hdp-hadoop/checkForFormat.sh",
-        mode => '0755'
-      }
-
-      exec { '/tmp/checkForFormat.sh':
-        command   => "sh /tmp/checkForFormat.sh ${hdfs_user} ${hadoop_conf_dir} ${mark_dir} ${dfs_name_dir} ",
-        unless   => "test -d ${mark_dir}",
-        require   => File['/tmp/checkForFormat.sh'],
-        path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-        logoutput => "true",
-        notify   => Hdp::Exec['set namenode mark']
-      }
-    }
-
-    hdp::exec { 'set namenode mark' :
-      command     => "mkdir -p ${mark_dir}",
-      refreshonly => true
-    }
-  }
-}

+ 0 - 28
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode/service_check.pp

@@ -1,28 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::namenode::service_check()
-{
-  hdp-hadoop::exec-hadoop { 'namenode::service_check':
-    command   => 'dfs -ls /',
-    tries     => 3,
-    try_sleep => 5
-  }
-}

+ 0 - 44
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/package.pp

@@ -1,44 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#singleton, but using define so can use collections to override params
-define hdp-hadoop::package(
-  $ensure = 'present',
-  $include_32_bit = false,
-  $include_64_bit = false
-)
-{
-  #just use 32 if its specifically requested and no 64 bit requests
-  if ($include_32_bit == true) and ($include_64_bit != true) {
-    $size = 32
-  } else  {
-    $size = 64
-  }
-  $package = "hadoop ${size}"
-  $lzo_enabled = $hdp::params::lzo_enabled
-
-  hdp::package{ $package:
-    ensure       => $ensure,
-    package_type => 'hadoop',
-    size         => $size,
-    lzo_needed   => $lzo_enabled
-  }
-  anchor{ 'hdp-hadoop::package::helper::begin': } -> Hdp::Package[$package] -> anchor{ 'hdp-hadoop::package::helper::end': }
-}

+ 0 - 222
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp

@@ -1,222 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::params(
-) inherits hdp::params 
-{
-
-  ##TODO: for testing in masterless mode
-  $use_preconditions = false
-  ####  
-  $conf_dir = $hdp::params::hadoop_conf_dir 
-
-  ####hbase
-  $hdfs_root_dir = $hdp::params::hbase_hdfs_root_dir
-
-  ####### users
-
-  $mapred_user = $hdp::params::mapred_user
-  $hdfs_user = $hdp::params::hdfs_user
-  
-  ##### security related
-  $keytab_path = hdp_default("keytab_path","/etc/security/keytabs")
- 
-  if ($hdp::params::security_enabled == true) {
-    $enable_security_authorization = true
-    $security_type = "kerberos"
-    $task_controller = "org.apache.hadoop.mapred.LinuxTaskController"
-    $dfs_datanode_address = 1019
-    $dfs_datanode_http_address = 1022
-  } else {
-    $enable_security_authorization = false
-    $security_type = "simple"
-    $task_controller = "org.apache.hadoop.mapred.DefaultTaskController"
-    $dfs_datanode_address = hdp_default("dfs_datanode_address","50010")
-    $dfs_datanode_http_address = hdp_default("dfs_datanode_http_address","50075")
-  }
-
-  ### hadoop-env
-  
-  $dtnode_heapsize = hdp_default("dtnode_heapsize","1024m")
-  $ttnode_heapsize = hdp_default("ttnode_heapsize","1024m")
-
-  $hadoop_heapsize = hdp_default("hadoop_heapsize","1024")
-
-  $hdfs_log_dir_prefix = hdp_default("hdfs_log_dir_prefix","/var/log/hadoop")
-
-  $hadoop_pid_dir_prefix = hdp_default("hadoop_pid_dir_prefix","/var/run/hadoop")
-  $run_dir = $hadoop_pid_dir_prefix
-
-  $namenode_formatted_mark_dir = "${run_dir}/hdfs/namenode/formatted/"
-
-  $jtnode_heapsize = hdp_default("jtnode_heapsize","1024m")
-
-  $jtnode_opt_maxnewsize = hdp_default("jtnode_opt_maxnewsize","200m")
-
-  $jtnode_opt_newsize = hdp_default("jtnode_opt_newsize","200m")
-
-  $namenode_heapsize = hdp_default("namenode_heapsize","1024m")
-
-  $namenode_opt_maxnewsize = hdp_default("namenode_opt_maxnewsize","640m")
-
-  $namenode_opt_newsize = hdp_default("namenode_opt_newsize","640m")
-  
-  $hadoop_libexec_dir = hdp_default("hadoop_libexec_dir","/usr/lib/hadoop/libexec")
-  
-  $mapreduce_libs_path = hdp_default("mapreduce_libs_path","/usr/lib/hadoop-mapreduce/*")
-  
-  $mapred_log_dir_prefix = hdp_default("mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-
-  $mapred_pid_dir_prefix = hdp_default("mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-
-  # Cannot create new dir in directory.pp, reusing existing path
-  $namenode_dirs_created_stub_dir = "${hdfs_log_dir_prefix}/${hdp::params::hdfs_user}"
-  $namenode_dirs_stub_filename = "namenode_dirs_created"
-
-  ### JSVC_HOME path is correct for AMD64 only, but can be changed through API
-  if ($hdp::params::hdp_os_type == "suse") {
-    $jsvc_path = hdp_default("jsvc_path","/usr/lib/bigtop-utils")
-  } else {
-    $jsvc_path = hdp_default("jsvc_path","/usr/libexec/bigtop-utils")
-  }
-
-  ### compression related
-  if (($hdp::params::lzo_enabled == true) and ($hdp::params::snappy_enabled == true)) {
-    $mapred_compress_map_output = true
-    $compression_codecs =  "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.SnappyCodec"
-    $mapred_map_output_compression_codec = "org.apache.hadoop.io.compress.SnappyCodec"
-  } elsif ($hdp::params::snappy_enabled == true) {
-    $mapred_compress_map_output = true
-    $compression_codecs = "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec" 
-    $mapred_map_output_compression_codec = "org.apache.hadoop.io.compress.SnappyCodec"
-  } elsif ($hdp::params::lzo_enabled == true) {
-    $mapred_compress_map_output = true
-    $compression_codecs = "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec"
-    $mapred_map_output_compression_codec = "com.hadoop.compression.lzo.LzoCodec"
-  } else { 
-    $mapred_compress_map_output = false
-    $compression_codecs = "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec"
-    $mapred_map_output_compression_codec = "org.apache.hadoop.io.compress.DefaultCodec"
-  }
-
-  ### core-site
-  if (hdp_get_major_stack_version($stack_version) >= 2) {
-    $fs_checkpoint_dir = hdp_default("hdfs-site/dfs.namenode.checkpoint.dir","/tmp/hadoop-hdfs/dfs/namesecondary")
-  } else {
-    $fs_checkpoint_dir = hdp_default("core-site/fs.checkpoint.dir","/tmp/hadoop-hdfs/dfs/namesecondary")
-  }
-
-  $proxyuser_group = hdp_default("core-site/proxyuser.group","users")
-  
-  $hadoop_tmp_dir = hdp_default("core-site/hadoop.tmp.dir","/tmp/hadoop-$hdfs_user")
-  
-  $hadoop_ssl_enabled = hdp_default("core-site/hadoop.ssl.enabled","false")
-
-  ### hdfs-site
-  $datanode_du_reserved = hdp_default("hdfs-site/datanode.du.reserved",1073741824)
-
-  $dfs_block_local_path_access_user = hdp_default("hdfs-site/dfs.block.local.path.access.user","hbase")
-
-  $dfs_data_dir = $hdp::params::dfs_data_dir
-
-  $dfs_datanode_data_dir_perm = hdp_default("hdfs-site/dfs.datanode.data.dir.perm",750)
-
-  $dfs_datanode_failed_volume_tolerated = hdp_default("hdfs-site/dfs.datanode.failed.volume.tolerated",0)
-
-  $dfs_exclude = hdp_default("hdfs-site/dfs.exclude","dfs.exclude")
-
-  $dfs_include = hdp_default("hdfs-site/dfs.include","dfs.include")
-  
-  if (hdp_get_major_stack_version($stack_version) >= 2) {
-    $dfs_name_dir = hdp_default("hdfs-site/dfs.namenode.name.dir","/tmp/hadoop-hdfs/dfs/name")
-  } else {
-    $dfs_name_dir = hdp_default("hdfs-site/dfs.name.dir","/tmp/hadoop-hdfs/dfs/name")
-  }
-  
-  $dfs_replication = hdp_default("hdfs-site/dfs.replication",3)
-
-  $dfs_support_append = hdp_default("hdfs-site/dfs.support.append",true)
-
-  $dfs_webhdfs_enabled = hdp_default("hdfs-site/dfs.webhdfs.enabled",false)
-  
-  $jn_edits_dir = hdp_default("hdfs-site/dfs.journalnode.edits.dir", "/grid/0/hdfs/journal")
-  
-  $dfs_domain_socket_path = hdp_default("hdfs-site/dfs.domain.socket.path","/var/lib/hadoop-hdfs/dn_socket")
-
- ######### mapred #######
-   ### mapred-site
-
-  $mapred_system_dir = '/mapred/system'
-
-  $mapred_child_java_opts_sz = hdp_default("mapred-site/mapred.child.java.opts.sz","-Xmx768m")
-
-  $mapred_cluster_map_mem_mb = hdp_default("mapred-site/mapred.cluster.map.mem.mb","-1")
-
-  $mapred_cluster_max_map_mem_mb = hdp_default("mapred-site/mapred.cluster.max.map.mem.mb","-1")
-
-  $mapred_cluster_max_red_mem_mb = hdp_default("mapred-site/mapred.cluster.max.red.mem.mb","-1")
-
-  $mapred_cluster_red_mem_mb = hdp_default("mapred-site/mapred.cluster.red.mem.mb","-1")
-
-  $mapred_job_map_mem_mb = hdp_default("mapred-site/mapred.job.map.mem.mb","-1")
-
-  $mapred_job_red_mem_mb = hdp_default("mapred-site/mapred.job.red.mem.mb","-1")
-
-  $mapred_jobstatus_dir = hdp_default("mapred-site/mapred.jobstatus.dir","file:////mapred/jobstatus")
-
-  if (hdp_get_major_stack_version($stack_version) >= 2) {
-    $mapred_local_dir = hdp_default("mapred-site/mapreduce.cluster.local.dir","/tmp/hadoop-mapred/mapred/local")
-  } else {
-    $mapred_local_dir = hdp_default("mapred-site/mapred.local.dir","/tmp/hadoop-mapred/mapred/local")
-  }
-
-  $mapred_tt_group = hdp_default("mapred-site/mapreduce.tasktracker.group", "hadoop")
-   
-  $mapreduce_userlog_retainhours = hdp_default("mapred-site/mapreduce.userlog.retainhours",24)
-
-  $maxtasks_per_job = hdp_default("mapred-site/maxtasks.per.job","-1")
-
-  $scheduler_name = hdp_default("mapred-site/scheduler.name","org.apache.hadoop.mapred.CapacityTaskScheduler")
-
-  #### health_check
-
-  $security_enabled = $hdp::params::security_enabled
-
-  $task_bin_exe = hdp_default("task_bin_exe")
-
-  $rca_enabled = hdp_default("rca_enabled", false)
-  $rca_disabled_prefix = "###"
-  if ($rca_enabled == true) {
-    $rca_prefix = ""
-  } else {
-    $rca_prefix = $rca_disabled_prefix
-  }
-  # $ambari_db_server_host = hdp_default("ambari_db_server_host", "localhost")
-  $ambari_db_rca_url = hdp_default("ambari_db_rca_url", "jdbc:postgresql://localhost/ambarirca")
-  $ambari_db_rca_driver = hdp_default("ambari_db_rca_driver", "org.postgresql.Driver")
-  $ambari_db_rca_username = hdp_default("ambari_db_rca_username", "mapred")
-  $ambari_db_rca_password = hdp_default("ambari_db_rca_password", "mapred")
-
-  if ($hdp::params::dfs_ha_enabled == true) {
-    $nameservice = $hdp::params::dfs_ha_nameservices
-    $namenode_id = hdp_hadoop_get_namenode_id("dfs.namenode.rpc-address.${nameservice}", "hdfs-site")
-  }
-
-}

+ 0 - 132
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/service.pp

@@ -1,132 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-define hdp-hadoop::service(
-  $ensure = 'running',
-  $user,
-  $initial_wait = undef,
-  $create_pid_dir = true,
-  $create_log_dir = true
-)
-{
-
-  $security_enabled = $hdp::params::security_enabled
-
-  #NOTE does not work if namenode and datanode are on same host 
-  $pid_dir = "${hdp-hadoop::params::hadoop_pid_dir_prefix}/${user}"
-  
-  $hadoop_libexec_dir = $hdp-hadoop::params::hadoop_libexec_dir
-  
-  if (($security_enabled == true) and ($name == 'datanode')) {
-    $run_as_root = true
-  } else {       
-    $run_as_root = false
-  }
-
-  if (($security_enabled == true) and ($name == 'datanode')) {
-    $hdfs_user = $hdp::params::hdfs_user
-    $pid_file = "${hdp-hadoop::params::hadoop_pid_dir_prefix}/${hdfs_user}/hadoop-${hdfs_user}-${name}.pid"
-  } else {
-    $pid_file = "${pid_dir}/hadoop-${user}-${name}.pid"
-  } 
-
-  $log_dir = "${hdp-hadoop::params::hdfs_log_dir_prefix}/${user}"
-  $hadoop_daemon = "export HADOOP_LIBEXEC_DIR=${hadoop_libexec_dir} && ${hdp::params::hadoop_bin}/hadoop-daemon.sh"
-   
-  $cmd = "${hadoop_daemon} --config ${hdp-hadoop::params::conf_dir}"
-  if ($ensure == 'running') {
-    if ($run_as_root == true) {
-      $daemon_cmd = "su - root -c  '${cmd} start ${name}'"
-    } else {
-      $daemon_cmd = "su - ${user} -c  '${cmd} start ${name}'"
-    }
-    # Here we check if pid file exists and if yes, then we run 'ps pid' command
-    # that returns 1 if process is not running
-    $service_is_up = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
-  } elsif ($ensure == 'stopped') {
-    if ($run_as_root == true) {
-      $daemon_cmd = "su - root -c  '${cmd} stop ${name}' && rm -f ${pid_file}"
-    } else {
-      $daemon_cmd = "su - ${user} -c  '${cmd} stop ${name}' && rm -f ${pid_file}"
-    }
-    $service_is_up = undef
-  } else {
-    $daemon_cmd = undef
-  }
- 
-  if ($create_pid_dir == true) {
-    hdp::directory_recursive_create { $pid_dir: 
-      owner       => $user,
-      context_tag => 'hadoop_service',
-      service_state => $::service_state,
-      force => true
-    }
-  }
-  
-  if ($create_log_dir == true) {
-    hdp::directory_recursive_create { $log_dir: 
-      owner       => $user,
-      context_tag => 'hadoop_service',
-      service_state => $::service_state,
-      force => true
-    }
-  }
-  if ($daemon_cmd != undef) {
-    if ($name == 'datanode' and $ensure == 'running') {
-      exec { 'delete_pid_before_datanode_start':
-        command  => "rm -f ${pid_file}",
-        unless       => $service_is_up,
-        path => $hdp::params::exec_path
-      }
-    }
-    hdp::exec { $daemon_cmd:
-      command      => $daemon_cmd,
-      unless       => $service_is_up,
-      initial_wait => $initial_wait
-    }
-  }
-
-  anchor{"hdp-hadoop::service::${name}::begin":}
-  anchor{"hdp-hadoop::service::${name}::end":}
-  if ($daemon_cmd != undef) {
-    Anchor["hdp-hadoop::service::${name}::begin"] -> Hdp::Exec[$daemon_cmd] -> Anchor["hdp-hadoop::service::${name}::end"]
-
-    if ($create_pid_dir == true) {
-      Anchor["hdp-hadoop::service::${name}::begin"] -> Hdp::Directory_recursive_create[$pid_dir] -> Hdp::Exec[$daemon_cmd] 
-    }
-     if ($create_log_dir == true) {
-      Anchor["hdp-hadoop::service::${name}::begin"] -> Hdp::Directory_recursive_create[$log_dir] -> Hdp::Exec[$daemon_cmd] 
-    }
-    if ($name == 'datanode' and $ensure == 'running') {
-      Anchor["hdp-hadoop::service::${name}::begin"] -> Exec['delete_pid_before_datanode_start'] -> Hdp::Exec[$daemon_cmd]
-    }
-  }
-  if ($ensure == 'running') {
-    #TODO: look at Puppet resource retry and retry_sleep
-    #TODO: can make sleep contingent on $name
-    $sleep = 5
-    $post_check = "sleep ${sleep}; ${service_is_up}"
-    hdp::exec { $post_check:
-      command => $post_check,
-      unless  => $service_is_up
-    }
-    Hdp::Exec[$daemon_cmd] -> Hdp::Exec[$post_check] -> Anchor["hdp-hadoop::service::${name}::end"]
-  }  
-}

+ 0 - 24
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/jobtracker-conn.pp

@@ -1,24 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::slave::jobtracker-conn($jobtracker_host)
-{
-  Hdp-Hadoop::Configfile<||>{jtnode_host => $jobtracker_host}
-}

+ 0 - 27
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/master-conn.pp

@@ -1,27 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::slave::master-conn($master_host)
-{
-  Hdp-Hadoop::Configfile<||>{
-    namenode_host => $master_host,
-    jtnode_host   => $master_host
-  }
-}

+ 0 - 27
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/namenode-conn.pp

@@ -1,27 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#TODO: this might be replaced by just using hdp::namenode-conn
-class hdp-hadoop::slave::namenode-conn($namenode_host)
-{
-  #TODO: check if can get rido of both
-  Hdp-Hadoop::Configfile<||>{namenode_host => $namenode_host}
-  Hdp::Configfile<||>{namenode_host => $namenode_host} #for components other than hadoop (e.g., hbase) 
-}

+ 0 - 46
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/smoketest.pp

@@ -1,46 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::smoketest(
-  $opts={}
-)
-{
-  #TODO: put in wait
-  #TODO: look for better way to compute outname
-  $date_format = '"%M%d%y"'
-  $outname = inline_template("<%=  `date +${date_format}`.chomp %>")
-
-  #TODO: hardwired to run on namenode and to use user hdfs
-
-  $put = "dfs -put /etc/passwd passwd-${outname}"
-  $exec = "jar /usr/share/hadoop/hadoop-examples-*.jar wordcount passwd-${outname} ${outname}.out"
-  $result = "fs -test -e ${outname}.out /dev/null 2>&1"
-  anchor{ "hdp-hadoop::smoketest::begin" :} ->
-  hdp-hadoop::exec-hadoop{ $put:
-    command => $put
-  } ->
-  hdp-hadoop::exec-hadoop{ $exec:
-    command =>  $exec
-  } ->
-  hdp-hadoop::exec-hadoop{ $result:
-    command =>  $result
-  } ->
-  anchor{ "hdp-hadoop::smoketest::end" :}
-}

+ 0 - 98
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/snamenode.pp

@@ -1,98 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::snamenode(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-hadoop::params  
-{
-  $hdp::params::service_exists['hdp-hadoop::snamenode'] = true
-
-  Hdp-hadoop::Common<||>{service_state => $service_state}
-  Hdp-hadoop::Package<||>{include_64_bit => true}
-  Hdp-hadoop::Configfile<||>{sizes +> 64}
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {
-    $fs_checkpoint_dir = $hdp-hadoop::params::fs_checkpoint_dir
-  
-    #adds package, users and directories, and common hadoop configs
-    include hdp-hadoop::initialize
-
-    if ( ($service_state == 'installed_and_configured') and
-         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-      if ($hdp::params::service_exists['hdp-hadoop::namenode'] != true) {
-        $masterHost = $kerberos_adminclient_host[0]
-        hdp::download_keytab { 'snamenode_service_keytab' :
-          masterhost => $masterHost,
-          keytabdst => "${$keytab_path}/nn.service.keytab",
-          keytabfile => 'nn.service.keytab',
-          owner => $hdp-hadoop::params::hdfs_user
-        }
-        hdp::download_keytab { 'snamenode_spnego_keytab' :   
-          masterhost => $masterHost,
-          keytabdst => "${$keytab_path}/spnego.service.keytab",
-          keytabfile => 'spnego.service.keytab', 
-          owner => $hdp-hadoop::params::hdfs_user,
-          mode => '0440',
-          group => $hdp::params::user_group
-        }
-      }
-    }
- 
-    Hdp-Hadoop::Configfile<||>{snamenode_host => $hdp::params::host_address}
-  
-    hdp-hadoop::snamenode::create_name_dirs { $fs_checkpoint_dir: 
-      service_state => $service_state
-    }
-    
-    if ($hdp::params::service_exists['hdp-hadoop::namenode'] == true) {
-      $create_pid_dir = false
-      $create_log_dir = false
-    } else {
-      $create_pid_dir = true
-      $create_log_dir = true
-    }
-    
-    hdp-hadoop::service{ 'secondarynamenode':
-      ensure         => $service_state,
-      user           => $hdp-hadoop::params::hdfs_user,
-      create_pid_dir => $create_pid_dir,
-      create_log_dir => $create_log_dir
-    }
-  
-    #top level does not need anchors
-    Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Namenode::Create_name_dirs<||> ->
-      Hdp-hadoop::Service['secondarynamenode'] -> Anchor['hdp-hadoop::end']
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-define hdp-hadoop::snamenode::create_name_dirs($service_state)
-{
-   $dirs = hdp_array_from_comma_list($name)
-   hdp::directory_recursive_create { $dirs :
-     owner => $hdp-hadoop::params::hdfs_user,
-     mode => '0755',
-     service_state => $service_state,
-     force => true
-  }
-}

+ 0 - 94
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/tasktracker.pp

@@ -1,94 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::tasktracker(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-hadoop::params
-{
-  $hdp::params::service_exists['hdp-hadoop::tasktracker'] = true
-
-  Hdp-hadoop::Common<||>{service_state => $service_state}
-
-  if ($hdp::params::use_32_bits_on_slaves == true) {
-    Hdp-hadoop::Package<||>{include_32_bit => true}
-    Hdp-hadoop::Configfile<||>{sizes +> 32}
-  } else {
-    Hdp-hadoop::Package<||>{include_64_bit => true}
-    Hdp-hadoop::Configfile<||>{sizes +> 64}
-  }
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
-    $mapred_local_dir = $hdp-hadoop::params::mapred_local_dir
-  
-    #adds package, users and directories, and common hadoop configs
-    include hdp-hadoop::initialize
-
-    if ( ($service_state == 'installed_and_configured') and
-         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-      $masterHost = $kerberos_adminclient_host[0]
-      hdp::download_keytab { 'tasktracker_service_keytab' :
-        masterhost => $masterHost,
-        keytabdst => "${$keytab_path}/tt.service.keytab",
-        keytabfile => 'tt.service.keytab',
-        owner => $hdp-hadoop::params::mapred_user
-      }
-    }
-
-    hdp-hadoop::tasktracker::create_local_dirs { $mapred_local_dir:
-      service_state => $service_state
-    }
-    
-    if ($hdp::params::service_exists['hdp-hadoop::jobtracker'] == true) {
-      $create_pid_dir = false
-      $create_log_dir = false
-    } else {
-      $create_pid_dir = true
-      $create_log_dir = true
-    }
-
-    hdp-hadoop::service{ 'tasktracker':
-      ensure => $service_state,
-      user   => $hdp-hadoop::params::mapred_user,
-      create_pid_dir => $create_pid_dir,
-      create_log_dir => $create_log_dir
-    }
-  
-    #top level does not need anchors
-    Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Tasktracker::Create_local_dirs<||> -> Hdp-hadoop::Service['tasktracker'] ->
-    Anchor['hdp-hadoop::end']
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-define hdp-hadoop::tasktracker::create_local_dirs($service_state)
-{
-  if ($hdp::params::service_exists['hdp-hadoop::jobtracker'] != true) {
-    $dirs = hdp_array_from_comma_list($name)
-    hdp::directory_recursive_create_ignore_failure { $dirs :
-      owner => $hdp-hadoop::params::mapred_user,
-      mode => '0755',
-      service_state => $service_state,
-      force => true
-    }
-  }
-}

+ 0 - 51
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/zkfc.pp

@@ -1,51 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::zkfc(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-hadoop::params 
-{
-
-  Hdp-hadoop::Common<||>{service_state => $service_state}
-  Hdp-hadoop::Package<||>{include_64_bit => true}
-  Hdp-hadoop::Configfile<||>{sizes +> 64}
-  
-  $hdfs_user = $hdp-hadoop::params::hdfs_user
-  
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
-  
-    #adds package, users and directories, and common hadoop configs
-    include hdp-hadoop::initialize
-    
-    hdp-hadoop::service{ 'zkfc':
-      ensure         => $service_state,
-      user           => $hdp-hadoop::params::hdfs_user,
-      create_pid_dir => true,
-      create_log_dir => true
-    }
-    
-    #top level does not need anchors
-    Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Service['zkfc'] -> Anchor['hdp-hadoop::end']
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}

+ 0 - 25
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/commons-logging.properties.erb

@@ -1,25 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-#Logging Implementation
-
-#Log4J
-org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger
-
-#JDK Logger
-#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger

+ 0 - 3
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/exclude_hosts_list.erb

@@ -1,3 +0,0 @@
-<% exlude_hosts_list.each do |val| -%>
-<%= val%>
-<% end -%>

+ 0 - 122
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb

@@ -1,122 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Set Hadoop-specific environment variables here.
-
-# The only required environment variable is JAVA_HOME.  All others are
-# optional.  When running a distributed configuration it is best to
-# set JAVA_HOME in this file, so that it is correctly defined on
-# remote nodes.
-
-# The java implementation to use.  Required.
-export JAVA_HOME=<%=scope.function_hdp_template_var("::hdp::params::java64_home")%>
-export HADOOP_HOME_WARN_SUPPRESS=1
-
-# Hadoop Configuration Directory
-#TODO: if env var set that can cause problems
-export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-<%=scope.function_hdp_template_var("conf_dir")%>}
-
-<% if scope.function_hdp_template_var("::hdp::params::isHadoop2Stack") == true %>
-# Path to jsvc required by secure HDP 2.0 datanode
-export JSVC_HOME=<%=scope.function_hdp_template_var("jsvc_path")%>
-<% end %>
-
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-export HADOOP_HEAPSIZE="<%=scope.function_hdp_template_var("hadoop_heapsize")%>"
-
-export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms<%=scope.function_hdp_template_var("namenode_heapsize")%>"
-
-# Extra Java runtime options.  Empty by default.
-export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
-
-# Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("namenode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("namenode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms<%=scope.function_hdp_template_var("namenode_heapsize")%> -Xmx<%=scope.function_hdp_template_var("namenode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
-HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("jtnode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("jtnode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx<%=scope.function_hdp_template_var("jtnode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
-
-HADOOP_TASKTRACKER_OPTS="-server -Xmx<%=scope.function_hdp_template_var("ttnode_heapsize")%> -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
-HADOOP_DATANODE_OPTS="-Xmx<%=scope.function_hdp_template_var("dtnode_heapsize")%> -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
-HADOOP_BALANCER_OPTS="-server -Xmx<%=scope.function_hdp_template_var("hadoop_heapsize")%>m ${HADOOP_BALANCER_OPTS}"
-
-export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("namenode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("namenode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx<%=scope.function_hdp_template_var("namenode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
-
-# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
-# On secure datanodes, user to run the datanode as after dropping privileges
-export HADOOP_SECURE_DN_USER=<%=scope.function_hdp_template_var("hdfs_user")%>
-
-# Extra ssh options.  Empty by default.
-export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
-
-# Where log files are stored.  $HADOOP_HOME/logs by default.
-export HADOOP_LOG_DIR=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER
-
-# History server logs
-export HADOOP_MAPRED_LOG_DIR=<%=scope.function_hdp_template_var("mapred_log_dir_prefix")%>/$USER
-
-# Where log files are stored in the secure data environment.
-export HADOOP_SECURE_DN_LOG_DIR=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$HADOOP_SECURE_DN_USER
-
-# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
-# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
-
-# host:path where hadoop code should be rsync'd from.  Unset by default.
-# export HADOOP_MASTER=master:/home/$USER/src/hadoop
-
-# Seconds to sleep between slave commands.  Unset by default.  This
-# can be useful in large clusters, where, e.g., slave rsyncs can
-# otherwise arrive faster than the master can service them.
-# export HADOOP_SLAVE_SLEEP=0.1
-
-# The directory where pid files are stored. /tmp by default.
-export HADOOP_PID_DIR=<%=scope.function_hdp_template_var("hadoop_pid_dir_prefix")%>/$USER
-export HADOOP_SECURE_DN_PID_DIR=<%=scope.function_hdp_template_var("hadoop_pid_dir_prefix")%>/$HADOOP_SECURE_DN_USER
-
-# History server pid
-export HADOOP_MAPRED_PID_DIR=<%=scope.function_hdp_template_var("mapred_pid_dir_prefix")%>/$USER
-
-YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
-
-# A string representing this instance of hadoop. $USER by default.
-export HADOOP_IDENT_STRING=$USER
-
-# The scheduling priority for daemon processes.  See 'man nice'.
-
-# export HADOOP_NICENESS=10
-
-# Use libraries from standard classpath
-JAVA_JDBC_LIBS=""
-#Add libraries required by mysql connector
-for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
-do
-  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
-done
-#Add libraries required by oracle connector
-for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
-do
-  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
-done
-#Add libraries required by nodemanager
-MAPREDUCE_LIBS=<%=scope.function_hdp_template_var("mapreduce_libs_path")%>
-export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
-
-# Setting path to hdfs command line
-export HADOOP_LIBEXEC_DIR=<%=scope.function_hdp_template_var("hadoop_libexec_dir")%>
-
-#Mostly required for hadoop 2.0
-export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64

+ 0 - 45
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties-GANGLIA.erb

@@ -1,45 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# syntax: [prefix].[source|sink|jmx].[instance].[options]
-# See package.html for org.apache.hadoop.metrics2 for details
-
-<%if not scope.function_hdp_is_empty(ganglia_server_host)%>
-*.period=60
-
-*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-*.sink.ganglia.period=10
-
-# default for supportsparse is false
-*.sink.ganglia.supportsparse=true
-
-.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
-.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
-
-# Hook up to the server
-namenode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8661
-datanode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-jobtracker.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8662
-tasktracker.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-maptask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-reducetask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-resourcemanager.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8664
-nodemanager.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-historyserver.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8666
-journalnode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-
-resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
-
-<% end %>

+ 0 - 45
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties.erb

@@ -1,45 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# syntax: [prefix].[source|sink|jmx].[instance].[options]
-# See package.html for org.apache.hadoop.metrics2 for details
-
-<%if not scope.function_hdp_is_empty(ganglia_server_host)%>
-*.period=60
-
-*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-*.sink.ganglia.period=10
-
-# default for supportsparse is false
-*.sink.ganglia.supportsparse=true
-
-.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
-.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
-
-# Hook up to the server
-namenode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8661
-datanode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-jobtracker.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8662
-tasktracker.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-maptask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-reducetask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-resourcemanager.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8664
-nodemanager.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-historyserver.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8666
-journalnode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-
-resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
-
-<% end %>

+ 0 - 17
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hdfs.conf.erb

@@ -1,17 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-<%=scope.function_hdp_template_var("hdfs_user")%>   - nofile 32768
-<%=scope.function_hdp_template_var("hdfs_user")%>   - nproc  65536

+ 0 - 91
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/health_check-v2.erb

@@ -1,91 +0,0 @@
-#!/bin/bash
-#
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-err=0;
-
-function check_disks {
-
-  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
-    fsdev=""
-    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
-    if [ -z "$fsdev" -a "$m" != "/mnt" ] ; then
-      msg_="$msg_ $m(u)"
-    else
-      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
-    fi
-  done
-
-  if [ -z "$msg_" ] ; then
-    echo "disks ok" ; exit 0
-  else
-    echo "$msg_" ; exit 2
-  fi
-
-}
-
-function check_link {
-  snmp=/usr/bin/snmpwalk
-  if [ -e $snmp ] ; then
-    $snmp -t 5 -Oe  -Oq  -Os -v 1 -c public localhost if | \
-    awk ' {
-      split($1,a,".") ;
-      if ( a[1] == "ifIndex" ) { ifIndex[a[2]] = $2 }
-      if ( a[1] == "ifDescr" ) { ifDescr[a[2]] = $2 }
-      if ( a[1] == "ifType" ) { ifType[a[2]] = $2 }
-      if ( a[1] == "ifSpeed" ) { ifSpeed[a[2]] = $2 }
-      if ( a[1] == "ifAdminStatus" ) { ifAdminStatus[a[2]] = $2 }
-      if ( a[1] == "ifOperStatus" ) { ifOperStatus[a[2]] = $2 }
-    }
-    END {
-      up=0;
-      for (i in ifIndex ) {
-      if ( ifType[i] == 6 && ifAdminStatus[i] == 1 && ifOperStatus[i] == 1 && ifSpeed[i] == 1000000000 ) {
-      up=i;
-      }
-      }
-      if ( up == 0 ) { print "check link" ; exit 2 }
-      else { print ifDescr[up],"ok" }
-    }'
-    exit $? ;
-  fi
-}
-
-# Run all checks
-# Disabled 'check_link' for now... 
-for check in disks ; do
-  msg=`check_${check}` ;
-  if [ $? -eq 0 ] ; then
-    ok_msg="$ok_msg$msg,"
-  else
-    err_msg="$err_msg$msg,"
-  fi
-done
-
-if [ ! -z "$err_msg" ] ; then
-  echo -n "ERROR $err_msg "
-fi
-if [ ! -z "$ok_msg" ] ; then
-  echo -n "OK: $ok_msg"
-fi
-
-echo
-
-# Success!
-exit 0

+ 0 - 118
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/health_check.erb

@@ -1,118 +0,0 @@
-#!/bin/bash
-#
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-err=0;
-
-function check_disks {
-
-  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
-    fsdev=""
-    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
-    if [ -z "$fsdev" ] ; then
-      msg_="$msg_ $m(u)"
-    else
-      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
-    fi
-  done
-
-  if [ -z "$msg_" ] ; then
-    echo "disks ok" ; exit 0
-  else
-    echo "$msg_" ; exit 2
-  fi
-
-}
-
-function check_taskcontroller {
-  if [ "<%=scope.function_hdp_template_var("::hdp::params::security_enabled")%>" == "true" ]; then
-    perm=`stat -c %a:%U:%G <%=scope.function_hdp_template_var("task_bin_exe")%> 2>/dev/null`
-    if [ $? -eq 0 ] && [ "$perm" == "6050:root:hadoop" ] ; then
-      echo "taskcontroller ok"
-    else
-      echo 'check taskcontroller' ; exit 1
-    fi
-  fi
-}
-
-function check_jetty {
-  hname=`hostname`
-  jmx=`curl -s -S -m 5 "http://$hname:<%=scope.function_hdp_template_var("::hdp::tasktracker_port")%>/jmx?qry=Hadoop:service=TaskTracker,name=ShuffleServerMetrics" 2>/dev/null` ;
-  if [ $? -eq 0 ] ; then
-    e=`echo $jmx | awk '/shuffle_exceptions_caught/ {printf"%d",$2}'` ;
-    e=${e:-0} # no jmx servlet ?
-    if [ $e -gt 10 ] ; then
-      echo "check jetty: shuffle_exceptions=$e" ; exit 1
-    else
-      echo "jetty ok"
-    fi
-  else
-    echo "check jetty: ping failed" ; exit 1
-  fi
-}
-
-function check_link {
-  snmp=/usr/bin/snmpwalk
-  if [ -e $snmp ] ; then
-    $snmp -t 5 -Oe  -Oq  -Os -v 1 -c public localhost if | \
-    awk ' {
-      split($1,a,".") ;
-      if ( a[1] == "ifIndex" ) { ifIndex[a[2]] = $2 }
-      if ( a[1] == "ifDescr" ) { ifDescr[a[2]] = $2 }
-      if ( a[1] == "ifType" ) { ifType[a[2]] = $2 }
-      if ( a[1] == "ifSpeed" ) { ifSpeed[a[2]] = $2 }
-      if ( a[1] == "ifAdminStatus" ) { ifAdminStatus[a[2]] = $2 }
-      if ( a[1] == "ifOperStatus" ) { ifOperStatus[a[2]] = $2 }
-    }
-    END {
-      up=0;
-      for (i in ifIndex ) {
-      if ( ifType[i] == 6 && ifAdminStatus[i] == 1 && ifOperStatus[i] == 1 && ifSpeed[i] == 1000000000 ) {
-      up=i;
-      }
-      }
-      if ( up == 0 ) { print "check link" ; exit 2 }
-      else { print ifDescr[up],"ok" }
-    }'
-    exit $? ;
-  fi
-}
-
-# Run all checks
-# Disabled 'check_link' for now... 
-for check in disks taskcontroller jetty; do
-  msg=`check_${check}` ;
-  if [ $? -eq 0 ] ; then
-    ok_msg="$ok_msg$msg,"
-  else
-    err_msg="$err_msg$msg,"
-  fi
-done
-
-if [ ! -z "$err_msg" ] ; then
-  echo -n "ERROR $err_msg "
-fi
-if [ ! -z "$ok_msg" ] ; then
-  echo -n "OK: $ok_msg"
-fi
-
-echo
-
-# Success!
-exit 0

+ 0 - 3
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/include_hosts_list.erb

@@ -1,3 +0,0 @@
-<% include_hosts_list.each do |val| -%>
-<%= val %>
-<% end -%>

+ 0 - 227
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/log4j.properties.erb

@@ -1,227 +0,0 @@
-# Copyright 2011 The Apache Software Foundation
-# 
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Define some default values that can be overridden by system properties
-hadoop.root.logger=INFO,console
-hadoop.log.dir=.
-hadoop.log.file=hadoop.log
-
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hadoop.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshhold=ALL
-
-#
-# Daily Rolling File Appender
-#
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this 
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-#
-# TaskLog Appender
-#
-
-#Default values
-hadoop.tasklog.taskid=null
-hadoop.tasklog.iscleanup=false
-hadoop.tasklog.noKeepSplits=4
-hadoop.tasklog.totalLogFileSize=100
-hadoop.tasklog.purgeLogSplits=true
-hadoop.tasklog.logsRetainHours=12
-
-log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
-
-log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
-log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-
-#
-#Security audit appender
-#
-hadoop.security.logger=INFO,console
-hadoop.security.log.maxfilesize=256MB
-hadoop.security.log.maxbackupindex=20
-log4j.category.SecurityLogger=${hadoop.security.logger}
-hadoop.security.log.file=SecurityAuth.audit
-log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender 
-log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
-log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
-log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
-
-log4j.appender.RFAS=org.apache.log4j.RollingFileAppender 
-log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
-log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
-log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
-log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
-
-#
-# hdfs audit logging
-#
-hdfs.audit.logger=INFO,console
-log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
-log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
-log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
-log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
-
-#
-# mapred audit logging
-#
-mapred.audit.logger=INFO,console
-log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
-log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
-log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
-log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
-
-#
-# Rolling File Appender
-#
-
-log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Logfile size and and 30-day backups
-log4j.appender.RFA.MaxFileSize=256MB
-log4j.appender.RFA.MaxBackupIndex=10
-
-log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
-log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-# Custom Logging levels
-
-hadoop.metrics.log.level=INFO
-#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
-#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
-#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
-
-# Jets3t library
-log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
-
-#
-# Null Appender
-# Trap security logger on the hadoop client side
-#
-log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
-
-<% if (scope.function_hdp_template_var("::hdp::params::is_jtnode_master") || scope.function_hdp_template_var("::hdp::params::is_rmnode_master"))%>
-#
-# Job Summary Appender 
-#
-# Use following logger to send summary to separate file defined by 
-# hadoop.mapreduce.jobsummary.log.file rolled daily:
-# hadoop.mapreduce.jobsummary.logger=INFO,JSA
-# 
-hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
-hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
-log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
-<% if (scope.function_hdp_get_major_stack_version([scope.function_hdp_template_var("stack_version")]) >= 2)%>
-# Set the ResourceManager summary log filename
-yarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log
-# Set the ResourceManager summary log level and appender
-yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
-#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
-
-# To enable AppSummaryLogging for the RM,
-# set yarn.server.resourcemanager.appsummary.logger to
-# <LEVEL>,RMSUMMARY in hadoop-env.sh
-
-# Appender for ResourceManager Application Summary Log
-# Requires the following properties to be set
-#    - hadoop.log.dir (Hadoop Log directory)
-#    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
-#    - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
-log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
-log4j.appender.RMSUMMARY.File=<%=scope.function_hdp_template_var("yarn_log_dir_prefix")%>/<%=scope.function_hdp_template_var("yarn_user")%>/${yarn.server.resourcemanager.appsummary.log.file}
-log4j.appender.RMSUMMARY.MaxFileSize=256MB
-log4j.appender.RMSUMMARY.MaxBackupIndex=20
-log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
-log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-<% else %>
-log4j.appender.JSA.File=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/<%=scope.function_hdp_template_var("mapred_user")%>/${hadoop.mapreduce.jobsummary.log.file}
-<%end-%>
-log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
-log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-log4j.appender.JSA.DatePattern=.yyyy-MM-dd
-log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
-<% if (scope.function_hdp_get_major_stack_version([scope.function_hdp_template_var("stack_version")]) >= 2)%>
-log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
-log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
-<% else %>
-log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
-log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
-<%end-%>
-<%end-%>
-
-<%=scope.function_hdp_template_var("rca_prefix")%>ambari.jobhistory.database=<%=scope.function_hdp_host("ambari_db_rca_url")%>
-<%=scope.function_hdp_template_var("rca_prefix")%>ambari.jobhistory.driver=<%=scope.function_hdp_host("ambari_db_rca_driver")%>
-<%=scope.function_hdp_template_var("rca_prefix")%>ambari.jobhistory.user=<%=scope.function_hdp_host("ambari_db_rca_username")%>
-<%=scope.function_hdp_template_var("rca_prefix")%>ambari.jobhistory.password=<%=scope.function_hdp_host("ambari_db_rca_password")%>
-<%=scope.function_hdp_template_var("rca_prefix")%>ambari.jobhistory.logger=DEBUG,JHA
-
-<%=scope.function_hdp_template_var("rca_prefix")%>log4j.appender.JHA=org.apache.ambari.log4j.hadoop.mapreduce.jobhistory.JobHistoryAppender
-<%=scope.function_hdp_template_var("rca_prefix")%>log4j.appender.JHA.database=${ambari.jobhistory.database}
-<%=scope.function_hdp_template_var("rca_prefix")%>log4j.appender.JHA.driver=${ambari.jobhistory.driver}
-<%=scope.function_hdp_template_var("rca_prefix")%>log4j.appender.JHA.user=${ambari.jobhistory.user}
-<%=scope.function_hdp_template_var("rca_prefix")%>log4j.appender.JHA.password=${ambari.jobhistory.password}
-
-<%=scope.function_hdp_template_var("rca_prefix")%>log4j.logger.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger=${ambari.jobhistory.logger}
-<%=scope.function_hdp_template_var("rca_prefix")%>log4j.additivity.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger=true

+ 0 - 3
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/slaves.erb

@@ -1,3 +0,0 @@
-<%h=scope.function_hdp_host("slave_hosts"); (h.kind_of?(Array) ? h : []).each do |host|-%>
-<%= host %>
-<%end-%>

+ 0 - 20
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/taskcontroller.cfg.erb

@@ -1,20 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-mapred.local.dir=<%=scope.function_hdp_template_var("mapred_local_dir")%>
-mapreduce.tasktracker.group=<%=scope.function_hdp_template_var("mapred_tt_group")%>
-hadoop.log.dir=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/<%=scope.function_hdp_template_var("mapred_user")%>

+ 0 - 26
ambari-agent/src/main/puppet/modules/hdp-hbase/files/hbaseSmoke.sh

@@ -1,26 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-disable 'ambarismoketest'
-drop 'ambarismoketest'
-create 'ambarismoketest','family'
-put 'ambarismoketest','row01','family:col01','value1'
-scan 'ambarismoketest'
-exit

+ 0 - 32
ambari-agent/src/main/puppet/modules/hdp-hbase/files/hbaseSmokeVerify.sh

@@ -1,32 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-conf_dir=$1
-data=$2
-echo "scan 'ambarismoketest'" | hbase --config $conf_dir shell > /tmp/hbase_chk_verify
-cat /tmp/hbase_chk_verify
-echo "Looking for $data"
-grep -q $data /tmp/hbase_chk_verify
-if [ "$?" -ne 0 ]
-then
-  exit 1
-fi
-
-grep -q '1 row(s)' /tmp/hbase_chk_verify

+ 0 - 51
ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/client.pp

@@ -1,51 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hbase::client(
-  $service_state = $hdp::params::cluster_client_state,
-  $opts = {}
-)
-{
-  include hdp-hbase::params
-  $hbase_tmp_dir = $hdp-hbase::params::hbase_tmp_dir
-
-  #assumption is there are no other hbase components on node
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['installed_and_configured','uninstalled']) {
-    if (($hdp::params::service_exists['hdp-hbase::master'] != true) and ($hdp::params::service_exists['hdp-hbase::regionserver'] != true)) {
-      #adds package, users, directories, and common configs
-      class { 'hdp-hbase': 
-        type          => 'client',
-        service_state => $service_state
-      }
-
-      hdp::directory_recursive_create_ignore_failure { "${hbase_tmp_dir}/local/jars":
-        owner => $hdp-hbase::params::hbase_user,
-        context_tag => 'hbase_client',
-        service_state => $service_state,
-        force => true
-      }
-
-      Class[ 'hdp-hbase' ] -> Hdp::Directory_recursive_create_ignore_failure<||>
-    }
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}

+ 0 - 113
ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/hbase/service_check.pp

@@ -1,113 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hbase::hbase::service_check() inherits hdp-hbase::params
-{
-  $smoke_test_user = $hdp::params::smokeuser
-  $security_enabled = $hdp::params::security_enabled
-  $conf_dir = $hdp::params::hbase_conf_dir
-  $smoke_user_keytab = $hdp::params::smokeuser_keytab
-  $hbase_user = $hdp-hbase::params::hbase_user
-  $hbase_keytab = $hdp::params::hbase_user_keytab
-  $serviceCheckData = hdp_unique_id_and_date()
-  $kinit_cmd = "${hdp::params::kinit_path_local} -kt ${smoke_user_keytab} ${smoke_test_user};"
-
-  anchor { 'hdp-hbase::hbase::service_check::begin':}
-
-  if (hdp_get_major_stack_version($hdp::params::stack_version) >= 2){
-    $output_file = "${hbase_hdfs_root_dir}/data/default/ambarismoketest"
-  } else {
-    $output_file = "${hbase_hdfs_root_dir}/ambarismoketest"
-  }
-
-  $test_cmd = "fs -test -e ${output_file}"
-
-  $hbase_servicecheck_file = '/tmp/hbase-smoke.sh'
-
-  file { '/tmp/hbaseSmokeVerify.sh':
-    ensure => present,
-    source => "puppet:///modules/hdp-hbase/hbaseSmokeVerify.sh",
-    mode => '0755',
-  }
-
-  file { $hbase_servicecheck_file:
-    mode => '0755',
-    content => template('hdp-hbase/hbase-smoke.sh.erb'),
-  }
-  if ($security_enabled == true) {
-    $servicecheckcmd = "su - ${smoke_test_user} -c '$kinit_cmd hbase --config $conf_dir  shell $hbase_servicecheck_file'"
-    $smokeverifycmd = "su - ${smoke_test_user} -c '$kinit_cmd /tmp/hbaseSmokeVerify.sh $conf_dir ${serviceCheckData}'"
-  } else {
-    $servicecheckcmd = "su - ${smoke_test_user} -c 'hbase --config $conf_dir  shell $hbase_servicecheck_file'"
-    $smokeverifycmd = "su - ${smoke_test_user} -c '/tmp/hbaseSmokeVerify.sh $conf_dir ${serviceCheckData}'"
-  }
-
-  exec { $hbase_servicecheck_file:
-    command   => $servicecheckcmd,
-    tries     => 3,
-    try_sleep => 5,
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    logoutput => "true"
-  }
-
-  exec { '/tmp/hbaseSmokeVerify.sh':
-    command   => $smokeverifycmd,
-    tries     => 3,
-    try_sleep => 5,
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    notify    => Hdp-hadoop::Exec-hadoop['hbase::service_check::test'],
-    logoutput => "true"
-  }
-
-  hdp-hadoop::exec-hadoop { 'hbase::service_check::test':
-    command     => $test_cmd,
-    refreshonly => true,
-    require     => Exec['/tmp/hbaseSmokeVerify.sh'],
-    before      => Anchor['hdp-hbase::hbase::service_check::end'] #TODO: remove after testing
-  }
-
-  if ($security_enabled == true) {
-    $hbase_grant_premissions_file = '/tmp/hbase_grant_permissions.sh'
-    $hbase_kinit_cmd = "${hdp::params::kinit_path_local} -kt ${hbase_keytab} ${hbase_user};"
-    $grantprivelegecmd = "$hbase_kinit_cmd hbase shell ${hbase_grant_premissions_file}"
-
-    file { $hbase_grant_premissions_file:
-      owner   => $hbase_user,
-      group   => $hdp::params::user_group,
-      mode => '0644',
-      content => template('hdp-hbase/hbase_grant_permissions.erb')
-      }
-      hdp-hadoop::exec-hadoop { '${smokeuser}_grant_privileges' :
-        command => $grantprivelegecmd,
-        require => File[$hbase_grant_premissions_file],
-        user => $hbase_user
-      }
-     Anchor['hdp-hbase::hbase::service_check::begin'] ->  File['/tmp/hbaseSmokeVerify.sh']
-       File[$hbase_servicecheck_file] ->  File[$hbase_grant_premissions_file] ->
-       Hdp-hadoop::Exec-hadoop['${smokeuser}_grant_privileges'] ->
-       Exec[$hbase_servicecheck_file] ->
-       Exec['/tmp/hbaseSmokeVerify.sh'] -> Anchor['hdp-hbase::hbase::service_check::end']
-  } else {
-    Anchor['hdp-hbase::hbase::service_check::begin'] ->  File['/tmp/hbaseSmokeVerify.sh']
-    File[$hbase_servicecheck_file] -> Exec[$hbase_servicecheck_file] -> Exec['/tmp/hbaseSmokeVerify.sh']
-    -> Anchor['hdp-hbase::hbase::service_check::end']
-  }
-  anchor{ 'hdp-hbase::hbase::service_check::end':}
-}

+ 0 - 155
ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/init.pp

@@ -1,155 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hbase(
-  $type,
-  $service_state) 
-{
-  include hdp-hbase::params
- 
-  $hbase_user = $hdp-hbase::params::hbase_user
-  $config_dir = $hdp-hbase::params::conf_dir
-  
-  $hdp::params::component_exists['hdp-hbase'] = true
-  $smokeuser = $hdp::params::smokeuser
-  $security_enabled = $hdp::params::security_enabled
-
-  #Configs generation  
-
-  if has_key($configuration, 'hbase-site') {
-    configgenerator::configfile{'hbase-site': 
-      modulespath => $hdp-hbase::params::conf_dir,
-      filename => 'hbase-site.xml',
-      module => 'hdp-hbase',
-      configuration => $configuration['hbase-site'],
-      owner => $hbase_user,
-      group => $hdp::params::user_group
-    }
-  } else { # Manually overriding ownership of file installed by hadoop package
-    file { "${hdp-hbase::params::conf_dir}/hbase-site.xml":
-      owner => $hbase_user,
-      group => $hdp::params::user_group
-    }
-  }
-
-  if has_key($configuration, 'hdfs-site') {
-    configgenerator::configfile{'hdfs-site':
-      modulespath => $hdp-hbase::params::conf_dir,
-      filename => 'hdfs-site.xml',
-      module => 'hdp-hbase',
-      configuration => $configuration['hdfs-site'],
-      owner => $hbase_user,
-      group => $hdp::params::user_group
-    }
-  } else { # Manually overriding ownership of file installed by hadoop package
-    file { "${hdp-hbase::params::conf_dir}/hdfs-site.xml":
-      owner => $hbase_user,
-      group => $hdp::params::user_group
-    }
-  }
-
-  if has_key($configuration, 'hbase-policy') {
-    configgenerator::configfile{'hbase-policy': 
-      modulespath => $hdp-hbase::params::conf_dir,
-      filename => 'hbase-policy.xml',
-      module => 'hdp-hbase',
-      configuration => $configuration['hbase-policy'],
-      owner => $hbase_user,
-      group => $hdp::params::user_group
-    }
-  } else { # Manually overriding ownership of file installed by hadoop package
-    file { "${hdp-hbase::params::conf_dir}/hbase-policy.xml":
-      owner => $hbase_user,
-      group => $hdp::params::user_group
-    }
-  }
-
-  anchor{'hdp-hbase::begin':}
-  anchor{'hdp-hbase::end':}
-
-  if ($service_state == 'uninstalled') {
-    hdp::package { 'hbase':
-      ensure => 'uninstalled'
-    }
-    hdp::directory { $config_dir:
-      service_state => $service_state,
-      force => true
-    }
-
-    Anchor['hdp-hbase::begin'] -> Hdp::Package['hbase'] -> Hdp::Directory[$config_dir] -> Anchor['hdp-hbase::end']
-
-  } else {  
-    hdp::package { 'hbase': }
-  
-    hdp::directory { $config_dir: 
-      service_state => $service_state,
-      force => true,
-      owner => $hbase_user,
-      group => $hdp::params::user_group,
-      override_owner => true
-    }
-
-   hdp-hbase::configfile { ['hbase-env.sh',  $hdp-hbase::params::metric-prop-file-name ]: 
-      type => $type
-    }
-
-    hdp-hbase::configfile { 'regionservers':}
-
-    if ($security_enabled == true) {
-      if ($type == 'master' and $service_state == 'running') {
-        hdp-hbase::configfile { 'hbase_master_jaas.conf' : }
-      } elsif ($type == 'regionserver' and $service_state == 'running') {
-        hdp-hbase::configfile { 'hbase_regionserver_jaas.conf' : }
-      } elsif ($type == 'client') {
-        hdp-hbase::configfile { 'hbase_client_jaas.conf' : }
-      }
-    }
-    Anchor['hdp-hbase::begin'] -> Hdp::Package['hbase'] -> Hdp::Directory[$config_dir] ->
-    Hdp-hbase::Configfile<||> ->  Anchor['hdp-hbase::end']
-  }
-}
-
-### config files
-define hdp-hbase::configfile(
-  $mode = undef,
-  $hbase_master_hosts = undef,
-  $template_tag = undef,
-  $type = undef,
-  $conf_dir = $hdp-hbase::params::conf_dir
-) 
-{
-  if ($name == $hdp-hbase::params::metric-prop-file-name) {
-    if ($type == 'master') {
-      $tag = GANGLIA-MASTER
-    } else {
-      $tag = GANGLIA-RS
-    }
-  } else {
-    $tag = $template_tag
-  }
-
-  hdp::configfile { "${conf_dir}/${name}":
-    component         => 'hbase',
-    owner             => $hdp-hbase::params::hbase_user,
-    mode              => $mode,
-    hbase_master_hosts => $hbase_master_hosts,
-    template_tag      => $tag
-  }
-}

+ 0 - 24
ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/master-conn.pp

@@ -1,24 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hbase::master-conn($hbase_master_hosts)
-{
-  Hdp-Hbase::Configfile<||>{hbase_master_hosts => $hbase_master_hosts}
-}

+ 0 - 66
ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/master.pp

@@ -1,66 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hbase::master(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-hbase::params 
-{
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {    
-    $hdp::params::service_exists['hdp-hbase::master'] = true
-
-    if ( ($service_state == 'installed_and_configured') and
-         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-       $masterHost = $kerberos_adminclient_host[0]
-       hdp::download_keytab { 'hbase_master_service_keytab' :
-         masterhost => $masterHost,
-         keytabdst => "${$keytab_path}/hm.service.keytab",
-         keytabfile => 'hm.service.keytab',
-         owner => $hdp::params::hbase_user
-       }
-    }
-  
-    #adds package, users, directories, and common configs
-    class { 'hdp-hbase': 
-      type          => 'master',
-      service_state => $service_state
-    }
-
-    Hdp-hbase::Configfile<||>{hbase_master_hosts => $hdp::params::host_address}
-  
-    hdp-hbase::service{ 'master':
-      ensure => $service_state
-    }
-
-    #top level does not need anchors
-    Class['hdp-hbase'] -> Hdp-hbase::Service['master'] 
-    } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-#assumes that master and regionserver will not be on same machine
-class hdp-hbase::master::enable-ganglia()
-{
-  Hdp-hbase::Configfile<|title  == $metric-prop-file-name |>{template_tag => 'GANGLIA-MASTER'}
-}
-

Daži faili netika attēloti, jo izmaiņu fails ir pārāk liels