Pārlūkot izejas kodu

(vbrodetskyi) - revert wrong commit

Vitaly Brodetskyi 11 gadi atpakaļ
vecāks
revīzija
c1441e05fe

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/controller/ganglia/GangliaHostPropertyProvider.java

@@ -39,7 +39,7 @@ public class GangliaHostPropertyProvider extends GangliaPropertyProvider{
 
   static {
     GANGLIA_CLUSTER_NAMES.add("HDPNameNode");
-    GANGLIA_CLUSTER_NAMES.add("HostMetrics");
+    GANGLIA_CLUSTER_NAMES.add("HDPSlaves");
     GANGLIA_CLUSTER_NAMES.add("HDPJobTracker");
     GANGLIA_CLUSTER_NAMES.add("HDPResourceManager");
     GANGLIA_CLUSTER_NAMES.add("HDPHBaseMaster");

+ 6 - 6
ambari-server/src/main/java/org/apache/ambari/server/controller/ganglia/GangliaPropertyProvider.java

@@ -59,16 +59,16 @@ public abstract class GangliaPropertyProvider extends AbstractPropertyProvider {
   
   static {
     GANGLIA_CLUSTER_NAME_MAP.put("NAMENODE",           Arrays.asList("HDPNameNode"));
-    GANGLIA_CLUSTER_NAME_MAP.put("DATANODE",           Arrays.asList("HDPDataNode", "HostMetrics"));
+    GANGLIA_CLUSTER_NAME_MAP.put("DATANODE",           Arrays.asList("HDPDataNode", "HDPSlaves"));
     GANGLIA_CLUSTER_NAME_MAP.put("JOBTRACKER",         Arrays.asList("HDPJobTracker"));
-    GANGLIA_CLUSTER_NAME_MAP.put("TASKTRACKER",        Arrays.asList("HDPTaskTracker", "HostMetrics"));
+    GANGLIA_CLUSTER_NAME_MAP.put("TASKTRACKER",        Arrays.asList("HDPTaskTracker", "HDPSlaves"));
     GANGLIA_CLUSTER_NAME_MAP.put("RESOURCEMANAGER",    Arrays.asList("HDPResourceManager"));
-    GANGLIA_CLUSTER_NAME_MAP.put("NODEMANAGER",        Arrays.asList("HDPNodeManager", "HostMetrics"));
+    GANGLIA_CLUSTER_NAME_MAP.put("NODEMANAGER",        Arrays.asList("HDPNodeManager", "HDPSlaves"));
     GANGLIA_CLUSTER_NAME_MAP.put("HISTORYSERVER",      Arrays.asList("HDPHistoryServer"));
     GANGLIA_CLUSTER_NAME_MAP.put("HBASE_MASTER",       Arrays.asList("HDPHBaseMaster"));
-    GANGLIA_CLUSTER_NAME_MAP.put("HBASE_REGIONSERVER", Arrays.asList("HDPHBaseRegionServer", "HostMetrics"));
-    GANGLIA_CLUSTER_NAME_MAP.put("FLUME_SERVER",       Arrays.asList("HDPFlumeServer", "HostMetrics"));
-    GANGLIA_CLUSTER_NAME_MAP.put("JOURNALNODE",        Arrays.asList("HDPJournalNode", "HostMetrics"));
+    GANGLIA_CLUSTER_NAME_MAP.put("HBASE_REGIONSERVER", Arrays.asList("HDPHBaseRegionServer", "HDPSlaves"));
+    GANGLIA_CLUSTER_NAME_MAP.put("FLUME_SERVER",       Arrays.asList("HDPFlumeServer", "HDPSlaves"));
+    GANGLIA_CLUSTER_NAME_MAP.put("JOURNALNODE",        Arrays.asList("HDPJournalNode", "HDPSlaves"));
     GANGLIA_CLUSTER_NAME_MAP.put("NIMBUS",             Arrays.asList("HDPNimbus"));
     GANGLIA_CLUSTER_NAME_MAP.put("SUPERVISOR",         Arrays.asList("HDPSupervisor"));
   }

+ 119 - 6
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/ganglia_monitor.py

@@ -100,18 +100,131 @@ class GangliaMonitor(Script):
   def generate_slave_configs(self):
     import params
 
-    generate_daemon("gmond",
-                    name = "HostMetrics",
-                    role = "monitor",
-                    owner = "root",
-                    group = params.user_group)
+    if params.is_namenode_master:
+      generate_daemon("gmond",
+                      name = "HDPNameNode",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_jtnode_master:
+      generate_daemon("gmond",
+                      name = "HDPJobTracker",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_hsnode_master:
+      generate_daemon("gmond",
+                      name = "HDPHistoryServer",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_hbase_master:
+      generate_daemon("gmond",
+                      name = "HDPHBaseMaster",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_slave:
+      generate_daemon("gmond",
+                      name = "HDPDataNode",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_tasktracker:
+      generate_daemon("gmond",
+                      name = "HDPTaskTracker",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_hbase_rs:
+      generate_daemon("gmond",
+                      name = "HDPHBaseRegionServer",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_flume:
+      generate_daemon("gmond",
+                      name = "HDPFlumeServer",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.pure_slave:
+      generate_daemon("gmond",
+                      name = "HDPSlaves",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
 
 
   def generate_master_configs(self):
     import params
 
+    if params.has_namenodes:
+      generate_daemon("gmond",
+                      name = "HDPNameNode",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_jobtracker:
+      generate_daemon("gmond",
+                      name = "HDPJobTracker",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_hbase_masters:
+      generate_daemon("gmond",
+                      name = "HDPHBaseMaster",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_historyserver:
+      generate_daemon("gmond",
+                      name = "HDPHistoryServer",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_slaves:
+      generate_daemon("gmond",
+                      name = "HDPDataNode",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_tasktracker:
+      generate_daemon("gmond",
+                      name = "HDPTaskTracker",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_hbase_rs:
+      generate_daemon("gmond",
+                      name = "HDPHBaseRegionServer",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_flume:
+      generate_daemon("gmond",
+                      name = "HDPFlumeServer",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
     generate_daemon("gmond",
-                    name = "HostMetrics",
+                    name = "HDPSlaves",
                     role = "server",
                     owner = "root",
                     group = params.user_group)

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/templates/gangliaClusters.conf.j2

@@ -45,7 +45,7 @@
     HDPNodeManager     	{{ganglia_server_host}}   8657
     HDPTaskTracker     	{{ganglia_server_host}}   8658
     HDPDataNode       	{{ganglia_server_host}}   8659
-    HostMetrics       	{{ganglia_server_host}}   8660
+    HDPSlaves       	{{ganglia_server_host}}   8660
     HDPNameNode         {{ganglia_server_host}}   8661
     HDPJobTracker     	{{ganglia_server_host}}  8662
     HDPHBaseMaster      {{ganglia_server_host}}   8663

+ 6 - 2
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-services.cfg.j2

@@ -189,8 +189,9 @@ define service {
 {% endif %}
 
 {% if hostgroup_defs['resourcemanager'] %}
+{% for hostname in hostgroup_defs['resourcemanager'] %}
 define service {
-        hostgroup_name          ganglia-server
+        host_name	        {{ hostname }}
         use                     hadoop-service
         service_description     GANGLIA::Ganglia Monitor process for ResourceManager
         servicegroups           GANGLIA
@@ -199,11 +200,13 @@ define service {
         retry_check_interval    0.25
         max_check_attempts      4
 }
+{% endfor %}
 {% endif %}
 
 {% if hostgroup_defs['historyserver2'] %}
+{% for hostname in hostgroup_defs['historyserver2'] %}
 define service {
-        hostgroup_name          ganglia-server
+        host_name	        {{ hostname }}
         use                     hadoop-service
         service_description     GANGLIA::Ganglia Monitor process for HistoryServer
         servicegroups           GANGLIA
@@ -212,6 +215,7 @@ define service {
         retry_check_interval    0.25
         max_check_attempts      4
 }
+{% endfor %}
 {% endif %}
 
 {% endif %}

+ 85 - 2
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia_monitor.py

@@ -100,12 +100,95 @@ class GangliaMonitor(Script):
   def generate_slave_configs(self):
     import params
 
+    if params.is_namenode_master:
+      generate_daemon("gmond",
+                      name = "HDPNameNode",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_jtnode_master:
+      generate_daemon("gmond",
+                      name = "HDPJobTracker",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_rmnode_master:
+      generate_daemon("gmond",
+                      name = "HDPResourceManager",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_hsnode_master:
+      generate_daemon("gmond",
+                      name = "HDPHistoryServer",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_nimbus_host:
+      generate_daemon("gmond",
+                      name = "HDPNimbus",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+    if params.is_supervisor_host:
+      generate_daemon("gmond",
+                      name = "HDPSupervisor",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_hbase_master:
+      generate_daemon("gmond",
+                      name = "HDPHBaseMaster",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
     generate_daemon("gmond",
-                    name = "HostMetrics",
+                    name = "HDPSlaves",
                     role = "monitor",
                     owner = "root",
                     group = params.user_group)
 
+    if params.is_slave:
+      generate_daemon("gmond",
+                      name = "HDPDataNode",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_tasktracker:
+      generate_daemon("gmond",
+                      name = "HDPTaskTracker",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_hbase_rs:
+      generate_daemon("gmond",
+                      name = "HDPHBaseRegionServer",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_flume:
+      generate_daemon("gmond",
+                      name = "HDPFlumeServer",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_jn_host:
+      generate_daemon("gmond",
+                      name = "HDPJournalNode",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
 
   def generate_master_configs(self):
     import params
@@ -202,7 +285,7 @@ class GangliaMonitor(Script):
                       group = params.user_group)
 
     generate_daemon("gmond",
-                    name = "HostMetrics",
+                    name = "HDPSlaves",
                     role = "server",
                     owner = "root",
                     group = params.user_group)

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/templates/gangliaClusters.conf.j2

@@ -44,7 +44,7 @@
     HDPNodeManager     	{{ganglia_server_host}}   8657
     HDPTaskTracker     	{{ganglia_server_host}}   8658
     HDPDataNode       	{{ganglia_server_host}}   8659
-    HostMetrics       	{{ganglia_server_host}}   8660
+    HDPSlaves       	{{ganglia_server_host}}   8660
     HDPNameNode         {{ganglia_server_host}}   8661
     HDPJobTracker     	{{ganglia_server_host}}   8662
     HDPHBaseMaster      {{ganglia_server_host}}   8663

+ 4 - 4
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2

@@ -51,21 +51,21 @@ hbase.extendedperiod = 3600
 # hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
 hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
 hbase.period=10
-hbase.servers={{ganglia_server_host}}:8656
+hbase.servers={{ganglia_server_host}}:8660
 
 # Configuration of the "jvm" context for ganglia
 # Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
 # jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
 jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
 jvm.period=10
-jvm.servers={{ganglia_server_host}}:8656
+jvm.servers={{ganglia_server_host}}:8660
 
 # Configuration of the "rpc" context for ganglia
 # Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
 # rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
 rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
 rpc.period=10
-rpc.servers={{ganglia_server_host}}:8656
+rpc.servers={{ganglia_server_host}}:8660
 
 #Ganglia following hadoop example
 hbase.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
@@ -77,4 +77,4 @@ hbase.sink.ganglia.period=10
 .sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
 .sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
 
-hbase.sink.ganglia.servers={{ganglia_server_host}}:8656
+hbase.sink.ganglia.servers={{ganglia_server_host}}:8660

+ 6 - 2
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-services.cfg.j2

@@ -204,8 +204,9 @@ define service {
 {% endif %}
 
 {% if hostgroup_defs['resourcemanager'] %}
+{% for hostname in hostgroup_defs['resourcemanager'] %}
 define service {
-        hostgroup_name          ganglia-server
+        host_name	        {{ hostname }}
         use                     hadoop-service
         service_description     GANGLIA::Ganglia Monitor process for ResourceManager
         servicegroups           GANGLIA
@@ -214,11 +215,13 @@ define service {
         retry_check_interval    0.25
         max_check_attempts      4
 }
+{% endfor %}
 {% endif %}
 
 {% if hostgroup_defs['historyserver2'] %}
+{% for hostname in hostgroup_defs['historyserver2'] %}
 define service {
-        hostgroup_name          ganglia-server
+        host_name	        {{ hostname }}
         use                     hadoop-service
         service_description     GANGLIA::Ganglia Monitor process for HistoryServer
         servicegroups           GANGLIA
@@ -227,6 +230,7 @@ define service {
         retry_check_interval    0.25
         max_check_attempts      4
 }
+{% endfor %}
 {% endif %}
 
 {% endif %}

+ 11 - 11
ambari-server/src/test/java/org/apache/ambari/server/controller/ganglia/GangliaPropertyProviderTest.java

@@ -125,7 +125,7 @@ public class GangliaPropertyProviderTest {
 
 
     String expected = (configuration.isGangliaSSL() ? "https" : "http") +
-        "://domU-12-31-39-0E-34-E1.compute-1.internal/cgi-bin/rrd.py?c=HDPDataNode%2CHostMetrics&h=domU-12-31-39-0E-34-E1.compute-1.internal&m=jvm.metrics.gcCount&s=10&e=20&r=1";
+        "://domU-12-31-39-0E-34-E1.compute-1.internal/cgi-bin/rrd.py?c=HDPDataNode%2CHDPSlaves&h=domU-12-31-39-0E-34-E1.compute-1.internal&m=jvm.metrics.gcCount&s=10&e=20&r=1";
     Assert.assertEquals(expected, streamProvider.getLastSpec());
 
     Assert.assertEquals(3, PropertyHelper.getProperties(resource).size());
@@ -175,7 +175,7 @@ public class GangliaPropertyProviderTest {
     expectedUri.setScheme((configuration.isGangliaSSL() ? "https" : "http"));
     expectedUri.setHost("domU-12-31-39-0E-34-E1.compute-1.internal");
     expectedUri.setPath("/cgi-bin/rrd.py");
-    expectedUri.setParameter("c", "HDPTaskTracker,HostMetrics");
+    expectedUri.setParameter("c", "HDPTaskTracker,HDPSlaves");
     expectedUri.setParameter("h", "domU-12-31-39-0E-34-E1.compute-1.internal");
     expectedUri.setParameter("m", metricsList);
     expectedUri.setParameter("s", "10");
@@ -323,7 +323,7 @@ public class GangliaPropertyProviderTest {
     uriBuilder.setScheme((configuration.isGangliaSSL() ? "https" : "http"));
     uriBuilder.setHost("domU-12-31-39-0E-34-E1.compute-1.internal");
     uriBuilder.setPath("/cgi-bin/rrd.py");
-    uriBuilder.setParameter("c", "HDPJobTracker,HDPHBaseMaster,HDPResourceManager,HDPFlumeServer,HostMetrics,HDPHistoryServer,HDPJournalNode,HDPTaskTracker,HDPHBaseRegionServer,HDPNameNode");
+    uriBuilder.setParameter("c", "HDPJobTracker,HDPHBaseMaster,HDPResourceManager,HDPFlumeServer,HDPSlaves,HDPHistoryServer,HDPJournalNode,HDPTaskTracker,HDPHBaseRegionServer,HDPNameNode");
     uriBuilder.setParameter("h", "domU-12-31-39-0E-34-E3.compute-1.internal,domU-12-31-39-0E-34-E1.compute-1.internal,domU-12-31-39-0E-34-E2.compute-1.internal");
     uriBuilder.setParameter("m", "jvm.metrics.gcCount");
     uriBuilder.setParameter("s", "10");
@@ -382,7 +382,7 @@ public class GangliaPropertyProviderTest {
     expectedUri.setScheme((configuration.isGangliaSSL() ? "https" : "http"));
     expectedUri.setHost("domU-12-31-39-0E-34-E1.compute-1.internal");
     expectedUri.setPath("/cgi-bin/rrd.py");
-    expectedUri.setParameter("c", "HDPJobTracker,HDPHBaseMaster,HDPResourceManager,HDPFlumeServer,HostMetrics,HDPHistoryServer,HDPJournalNode,HDPTaskTracker,HDPHBaseRegionServer,HDPNameNode");
+    expectedUri.setParameter("c", "HDPJobTracker,HDPHBaseMaster,HDPResourceManager,HDPFlumeServer,HDPSlaves,HDPHistoryServer,HDPJournalNode,HDPTaskTracker,HDPHBaseRegionServer,HDPNameNode");
    
     expectedUri.setParameter("h", hostsList.toString());
     expectedUri.setParameter("m", "jvm.metrics.gcCount");
@@ -438,7 +438,7 @@ public class GangliaPropertyProviderTest {
     expectedUri.setScheme((configuration.isGangliaSSL() ? "https" : "http"));
     expectedUri.setHost("domU-12-31-39-0E-34-E1.compute-1.internal");
     expectedUri.setPath("/cgi-bin/rrd.py");
-    expectedUri.setParameter("c", "HDPFlumeServer,HostMetrics");
+    expectedUri.setParameter("c", "HDPFlumeServer,HDPSlaves");
     expectedUri.setParameter("h", "ip-10-39-113-33.ec2.internal");
     expectedUri.setParameter("m", metricsList);
     expectedUri.setParameter("s", "10");
@@ -501,7 +501,7 @@ public class GangliaPropertyProviderTest {
     expectedUri.setScheme((configuration.isGangliaSSL() ? "https" : "http"));
     expectedUri.setHost("domU-12-31-39-0E-34-E1.compute-1.internal");
     expectedUri.setPath("/cgi-bin/rrd.py");
-    expectedUri.setParameter("c", "HDPFlumeServer,HostMetrics");
+    expectedUri.setParameter("c", "HDPFlumeServer,HDPSlaves");
     expectedUri.setParameter("h", "ip-10-39-113-33.ec2.internal");
     expectedUri.setParameter("m", metricsList);
     expectedUri.setParameter("e", "now");
@@ -546,7 +546,7 @@ public class GangliaPropertyProviderTest {
     Assert.assertEquals(1, propertyProvider.populateResources(Collections.singleton(resource), request, null).size());
 
     String expected = (configuration.isGangliaSSL() ? "https" : "http") +
-        "://domU-12-31-39-0E-34-E1.compute-1.internal/cgi-bin/rrd.py?c=HDPFlumeServer%2CHostMetrics&h=ip-10-39-113-33.ec2.internal&m=";
+        "://domU-12-31-39-0E-34-E1.compute-1.internal/cgi-bin/rrd.py?c=HDPFlumeServer%2CHDPSlaves&h=ip-10-39-113-33.ec2.internal&m=";
     
     Assert.assertTrue(streamProvider.getLastSpec().startsWith(expected));
 
@@ -593,7 +593,7 @@ public class GangliaPropertyProviderTest {
     expectedUri.setScheme((configuration.isGangliaSSL() ? "https" : "http"));
     expectedUri.setHost("domU-12-31-39-0E-34-E1.compute-1.internal");
     expectedUri.setPath("/cgi-bin/rrd.py");
-    expectedUri.setParameter("c", "HDPFlumeServer,HostMetrics");
+    expectedUri.setParameter("c", "HDPFlumeServer,HDPSlaves");
     expectedUri.setParameter("h", "ip-10-39-113-33.ec2.internal");
     expectedUri.setParameter("m", metricsList);
     expectedUri.setParameter("s", "10");
@@ -651,7 +651,7 @@ public class GangliaPropertyProviderTest {
     expectedUri.setScheme((configuration.isGangliaSSL() ? "https" : "http"));
     expectedUri.setHost("domU-12-31-39-0E-34-E1.compute-1.internal");
     expectedUri.setPath("/cgi-bin/rrd.py");
-    expectedUri.setParameter("c", "HDPFlumeServer,HostMetrics");
+    expectedUri.setParameter("c", "HDPFlumeServer,HDPSlaves");
     expectedUri.setParameter("h", "ip-10-39-113-33.ec2.internal");
     expectedUri.setParameter("m", metricsList);
     expectedUri.setParameter("s", "10");
@@ -710,7 +710,7 @@ public class GangliaPropertyProviderTest {
     expectedUri.setScheme((configuration.isGangliaSSL() ? "https" : "http"));
     expectedUri.setHost("domU-12-31-39-0E-34-E1.compute-1.internal");
     expectedUri.setPath("/cgi-bin/rrd.py");
-    expectedUri.setParameter("c", "HDPFlumeServer,HostMetrics");
+    expectedUri.setParameter("c", "HDPFlumeServer,HDPSlaves");
     expectedUri.setParameter("h", "ip-10-39-113-33.ec2.internal");
     expectedUri.setParameter("m", metricsList);
     expectedUri.setParameter("s", "10");
@@ -769,7 +769,7 @@ public class GangliaPropertyProviderTest {
     expectedUri.setScheme((configuration.isGangliaSSL() ? "https" : "http"));
     expectedUri.setHost("domU-12-31-39-0E-34-E1.compute-1.internal");
     expectedUri.setPath("/cgi-bin/rrd.py");
-    expectedUri.setParameter("c", "HDPFlumeServer,HostMetrics");
+    expectedUri.setParameter("c", "HDPFlumeServer,HDPSlaves");
     expectedUri.setParameter("h", "ip-10-39-113-33.ec2.internal");
     expectedUri.setParameter("m", metricsList);
     expectedUri.setParameter("s", "10");

+ 79 - 2
ambari-server/src/test/python/stacks/1.3.2/GANGLIA/test_ganglia_monitor.py

@@ -188,7 +188,35 @@ class TestGangliaMonitor(RMFTestCase):
         group = 'root',
         mode = 0755,
     )
-    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HostMetrics -o root -g hadoop',
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -o root -g hadoop',
+        path = ['/usr/libexec/hdp/ganglia',
+           '/usr/sbin',
+           '/sbin:/usr/local/bin',
+           '/bin',
+           '/usr/bin'],
+    )
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHistoryServer -o root -g hadoop',
+        path = ['/usr/libexec/hdp/ganglia',
+           '/usr/sbin',
+           '/sbin:/usr/local/bin',
+           '/bin',
+           '/usr/bin'],
+    )
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPDataNode -o root -g hadoop',
+        path = ['/usr/libexec/hdp/ganglia',
+           '/usr/sbin',
+           '/sbin:/usr/local/bin',
+           '/bin',
+           '/usr/bin'],
+    )
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPTaskTracker -o root -g hadoop',
+        path = ['/usr/libexec/hdp/ganglia',
+           '/usr/sbin',
+           '/sbin:/usr/local/bin',
+           '/bin',
+           '/usr/bin'],
+    )
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseRegionServer -o root -g hadoop',
         path = ['/usr/libexec/hdp/ganglia',
            '/usr/sbin',
            '/sbin:/usr/local/bin',
@@ -214,7 +242,56 @@ class TestGangliaMonitor(RMFTestCase):
 
 
   def assert_gmond_master_conf_generated(self):
-    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HostMetrics -m -o root -g hadoop',
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m -o root -g hadoop',
+        path = ['/usr/libexec/hdp/ganglia',
+           '/usr/sbin',
+           '/sbin:/usr/local/bin',
+           '/bin',
+           '/usr/bin'],
+    )
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m -o root -g hadoop',
+        path = ['/usr/libexec/hdp/ganglia',
+           '/usr/sbin',
+           '/sbin:/usr/local/bin',
+           '/bin',
+           '/usr/bin'],
+    )
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m -o root -g hadoop',
+        path = ['/usr/libexec/hdp/ganglia',
+           '/usr/sbin',
+           '/sbin:/usr/local/bin',
+           '/bin',
+           '/usr/bin'],
+    )
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHistoryServer -m -o root -g hadoop',
+        path = ['/usr/libexec/hdp/ganglia',
+           '/usr/sbin',
+           '/sbin:/usr/local/bin',
+           '/bin',
+           '/usr/bin'],
+    )
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPDataNode -m -o root -g hadoop',
+        path = ['/usr/libexec/hdp/ganglia',
+           '/usr/sbin',
+           '/sbin:/usr/local/bin',
+           '/bin',
+           '/usr/bin'],
+    )
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPTaskTracker -m -o root -g hadoop',
+        path = ['/usr/libexec/hdp/ganglia',
+           '/usr/sbin',
+           '/sbin:/usr/local/bin',
+           '/bin',
+           '/usr/bin'],
+    )
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseRegionServer -m -o root -g hadoop',
+        path = ['/usr/libexec/hdp/ganglia',
+           '/usr/sbin',
+           '/sbin:/usr/local/bin',
+           '/bin',
+           '/usr/bin'],
+    )
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m -o root -g hadoop',
         path = ['/usr/libexec/hdp/ganglia',
            '/usr/sbin',
            '/sbin:/usr/local/bin',

+ 23 - 2
ambari-server/src/test/python/stacks/2.0.6/GANGLIA/test_ganglia_monitor.py

@@ -176,7 +176,28 @@ class TestGangliaMonitor(RMFTestCase):
         group = 'root',
         mode = 0755,
     )
-    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HostMetrics -o root -g hadoop',
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -o root -g hadoop',
+        path = ['/usr/libexec/hdp/ganglia',
+           '/usr/sbin',
+           '/sbin:/usr/local/bin',
+           '/bin',
+           '/usr/bin'],
+    )
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNimbus -o root -g hadoop',
+        path = ['/usr/libexec/hdp/ganglia',
+           '/usr/sbin',
+           '/sbin:/usr/local/bin',
+           '/bin',
+           '/usr/bin'],
+    )
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSupervisor -o root -g hadoop',
+        path = ['/usr/libexec/hdp/ganglia',
+           '/usr/sbin',
+           '/sbin:/usr/local/bin',
+           '/bin',
+           '/usr/bin'],
+    )
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -o root -g hadoop',
         path = ['/usr/libexec/hdp/ganglia',
            '/usr/sbin',
            '/sbin:/usr/local/bin',
@@ -265,7 +286,7 @@ class TestGangliaMonitor(RMFTestCase):
            '/bin',
            '/usr/bin'],
     )
-    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HostMetrics -m -o root -g hadoop',
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m -o root -g hadoop',
         path = ['/usr/libexec/hdp/ganglia',
            '/usr/sbin',
            '/sbin:/usr/local/bin',

+ 1 - 1
ambari-web/app/views/main/dashboard.js

@@ -526,7 +526,7 @@ App.MainDashboardView = Em.View.extend(App.UserPref, {
   },
 
   gangliaUrl: function () {
-    return App.router.get('clusterController.gangliaUrl') + "/?r=hour&cs=&ce=&m=&s=by+name&c=HostMetrics&tab=m&vn=";
+    return App.router.get('clusterController.gangliaUrl') + "/?r=hour&cs=&ce=&m=&s=by+name&c=HDPSlaves&tab=m&vn=";
   }.property('App.router.clusterController.gangliaUrl'),
 
   showAlertsPopup: function (event) {