Explorar el Código

AMBARI-18965 : Investigate and reduce AMS HA dependency on Zookeeper. (avijayan)

Aravindan Vijayan hace 8 años
padre
commit
1c89883e80
Se han modificado 13 ficheros con 96 adiciones y 35 borrados
  1. 32 8
      ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
  2. 0 1
      ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHAHelper.java
  3. 0 1
      ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHATest.java
  4. 1 1
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
  5. 8 10
      ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
  6. 14 0
      ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
  7. 3 3
      ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/AbstractTimelineAggregator.java
  8. 2 2
      ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/availability/MetricCollectorHAController.java
  9. 13 1
      ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java
  10. 2 2
      ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/availability/MetricCollectorHAControllerTest.java
  11. 1 1
      ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml
  12. 12 0
      ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
  13. 8 5
      ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py

+ 32 - 8
ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java

@@ -84,6 +84,10 @@ public abstract class AbstractTimelineMetricsSink {
   public int ZK_CONNECT_TRY_COUNT = 10;
   public int ZK_CONNECT_TRY_COUNT = 10;
   public int ZK_SLEEP_BETWEEN_RETRY_TIME = 2000;
   public int ZK_SLEEP_BETWEEN_RETRY_TIME = 2000;
   public boolean shardExpired = true;
   public boolean shardExpired = true;
+  private int zookeeperMinBackoffTimeMins = 2;
+  private int zookeeperMaxBackoffTimeMins = 5;
+  private long zookeeperBackoffTimeMillis;
+  private long lastFailedZkRequestTime = 0l;
 
 
   private SSLSocketFactory sslSocketFactory;
   private SSLSocketFactory sslSocketFactory;
 
 
@@ -132,6 +136,7 @@ public abstract class AbstractTimelineMetricsSink {
     metricSinkWriteShardStrategy = new MetricSinkWriteShardHostnameHashingStrategy(getHostname());
     metricSinkWriteShardStrategy = new MetricSinkWriteShardHostnameHashingStrategy(getHostname());
     collectorHAHelper = new MetricCollectorHAHelper(getZookeeperQuorum(),
     collectorHAHelper = new MetricCollectorHAHelper(getZookeeperQuorum(),
       ZK_CONNECT_TRY_COUNT, ZK_SLEEP_BETWEEN_RETRY_TIME);
       ZK_CONNECT_TRY_COUNT, ZK_SLEEP_BETWEEN_RETRY_TIME);
+    zookeeperBackoffTimeMillis = getZookeeperBackoffTimeMillis();
     isInitializedForHA = true;
     isInitializedForHA = true;
   }
   }
 
 
@@ -337,13 +342,27 @@ public abstract class AbstractTimelineMetricsSink {
     }
     }
 
 
     // Reach out to all configured collectors before Zookeeper
     // Reach out to all configured collectors before Zookeeper
-    refreshCollectorsFromConfigured();
+    Collection<String> collectorHosts = getConfiguredCollectorHosts();
+    refreshCollectorsFromConfigured(collectorHosts);
 
 
     // Lookup Zookeeper for live hosts - max 10 seconds wait time
     // Lookup Zookeeper for live hosts - max 10 seconds wait time
-    if (allKnownLiveCollectors.size() == 0 && getZookeeperQuorum() != null) {
-      //TODO : Bring back Zk fallback after proper curation.
-      LOG.info("No live collectors from configuration. Not requesting zookeeper...");
-      //allKnownLiveCollectors.addAll(collectorHAHelper.findLiveCollectorHostsFromZNode());
+    long currentTime = System.currentTimeMillis();
+    if (allKnownLiveCollectors.size() == 0 && getZookeeperQuorum() != null
+      && (currentTime - lastFailedZkRequestTime) > zookeeperBackoffTimeMillis) {
+
+      LOG.info("No live collectors from configuration. Requesting zookeeper...");
+      allKnownLiveCollectors.addAll(collectorHAHelper.findLiveCollectorHostsFromZNode());
+      boolean noNewCollectorFromZk = true;
+      for (String collectorHostFromZk : allKnownLiveCollectors) {
+        if (!collectorHosts.contains(collectorHostFromZk)) {
+          noNewCollectorFromZk = false;
+          break;
+        }
+      }
+      if (noNewCollectorFromZk) {
+        LOG.info("No new collector was found from Zookeeper. Will not request zookeeper for " + zookeeperBackoffTimeMillis + " millis");
+        lastFailedZkRequestTime = System.currentTimeMillis();
+      }
     }
     }
 
 
     if (allKnownLiveCollectors.size() != 0) {
     if (allKnownLiveCollectors.size() != 0) {
@@ -356,7 +375,7 @@ public abstract class AbstractTimelineMetricsSink {
             // OR
             // OR
             // through Expiry (Refresh needed to pick up dead collectors that might have not become alive).
             // through Expiry (Refresh needed to pick up dead collectors that might have not become alive).
             if (shardExpired) {
             if (shardExpired) {
-              refreshCollectorsFromConfigured();
+              refreshCollectorsFromConfigured(getConfiguredCollectorHosts());
             }
             }
             return metricSinkWriteShardStrategy.findCollectorShard(new ArrayList<>(allKnownLiveCollectors));
             return metricSinkWriteShardStrategy.findCollectorShard(new ArrayList<>(allKnownLiveCollectors));
           }
           }
@@ -376,8 +395,7 @@ public abstract class AbstractTimelineMetricsSink {
     return null;
     return null;
   }
   }
 
 
-  private void refreshCollectorsFromConfigured() {
-    Collection<String> collectorHosts = getConfiguredCollectorHosts();
+  private void refreshCollectorsFromConfigured(Collection<String> collectorHosts) {
 
 
     LOG.debug("Trying to find live collector host from : " + collectorHosts);
     LOG.debug("Trying to find live collector host from : " + collectorHosts);
     if (collectorHosts != null && !collectorHosts.isEmpty()) {
     if (collectorHosts != null && !collectorHosts.isEmpty()) {
@@ -497,6 +515,12 @@ public abstract class AbstractTimelineMetricsSink {
     return hosts;
     return hosts;
   }
   }
 
 
+
+  private long getZookeeperBackoffTimeMillis() {
+    return (zookeeperMinBackoffTimeMins +
+      rand.nextInt(zookeeperMaxBackoffTimeMins - zookeeperMinBackoffTimeMins + 1)) * 60*1000l;
+  }
+
   /**
   /**
    * Get a pre-formatted URI for the collector
    * Get a pre-formatted URI for the collector
    */
    */

+ 0 - 1
ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHAHelper.java

@@ -23,7 +23,6 @@ import org.apache.curator.CuratorZookeeperClient;
 import org.apache.curator.RetryLoop;
 import org.apache.curator.RetryLoop;
 import org.apache.curator.RetryPolicy;
 import org.apache.curator.RetryPolicy;
 import org.apache.curator.retry.BoundedExponentialBackoffRetry;
 import org.apache.curator.retry.BoundedExponentialBackoffRetry;
-import org.apache.curator.retry.RetryUntilElapsed;
 import org.apache.zookeeper.ZooKeeper;
 import org.apache.zookeeper.ZooKeeper;
 import org.apache.zookeeper.data.Stat;
 import org.apache.zookeeper.data.Stat;
 
 

+ 0 - 1
ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHATest.java

@@ -49,7 +49,6 @@ import static org.powermock.api.easymock.PowerMock.verifyAll;
 @PrepareForTest({AbstractTimelineMetricsSink.class, URL.class, HttpURLConnection.class, MetricCollectorHAHelper.class})
 @PrepareForTest({AbstractTimelineMetricsSink.class, URL.class, HttpURLConnection.class, MetricCollectorHAHelper.class})
 public class MetricCollectorHATest {
 public class MetricCollectorHATest {
 
 
-  @Ignore
   @Test
   @Test
   public void findCollectorUsingZKTest() throws Exception {
   public void findCollectorUsingZKTest() throws Exception {
     InputStream is = createNiceMock(InputStream.class);
     InputStream is = createNiceMock(InputStream.class);

+ 1 - 1
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py

@@ -223,7 +223,7 @@ class Configuration:
     return self.get("collector", "failover_strategy", ROUND_ROBIN_FAILOVER_STRATEGY)
     return self.get("collector", "failover_strategy", ROUND_ROBIN_FAILOVER_STRATEGY)
 
 
   def get_failover_strategy_blacklisted_interval_seconds(self):
   def get_failover_strategy_blacklisted_interval_seconds(self):
-    return self.get("collector", "failover_strategy_blacklisted_interval_seconds", 600)
+    return self.get("collector", "failover_strategy_blacklisted_interval_seconds", 300)
 
 
   def get_hostname_script(self):
   def get_hostname_script(self):
     if self.hostname_script:
     if self.hostname_script:

+ 8 - 10
ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java

@@ -107,16 +107,14 @@ public class HBaseTimelineMetricStore extends AbstractService implements Timelin
       // Initialize policies before TTL update
       // Initialize policies before TTL update
       hBaseAccessor.initPoliciesAndTTL();
       hBaseAccessor.initPoliciesAndTTL();
       // Start HA service
       // Start HA service
-      if (configuration.isDistributedOperationModeEnabled()) {
-        // Start the controller
-        haController = new MetricCollectorHAController(configuration);
-        try {
-          haController.initializeHAController();
-        } catch (Exception e) {
-          LOG.error(e);
-          throw new MetricsSystemInitializationException("Unable to " +
-            "initialize HA controller", e);
-        }
+      // Start the controller
+      haController = new MetricCollectorHAController(configuration);
+      try {
+        haController.initializeHAController();
+      } catch (Exception e) {
+        LOG.error(e);
+        throw new MetricsSystemInitializationException("Unable to " +
+          "initialize HA controller", e);
       }
       }
 
 
       String whitelistFile = metricsConf.get(TIMELINE_METRICS_WHITELIST_FILE, "");
       String whitelistFile = metricsConf.get(TIMELINE_METRICS_WHITELIST_FILE, "");

+ 14 - 0
ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java

@@ -320,6 +320,20 @@ public class TimelineMetricConfiguration {
     return hbaseConf.getTrimmed("hbase.zookeeper.quorum");
     return hbaseConf.getTrimmed("hbase.zookeeper.quorum");
   }
   }
 
 
+  public String getClusterZKClientPort() throws MalformedURLException, URISyntaxException {
+    if (!isInitialized) {
+      initialize();
+    }
+    return metricsConf.getTrimmed("cluster.zookeeper.property.clientPort", "2181");
+  }
+
+  public String getClusterZKQuorum() throws MalformedURLException, URISyntaxException {
+    if (!isInitialized) {
+      initialize();
+    }
+    return metricsConf.getTrimmed("cluster.zookeeper.quorum");
+  }
+
   public String getInstanceHostnameFromEnv() throws UnknownHostException {
   public String getInstanceHostnameFromEnv() throws UnknownHostException {
     String amsInstanceName = System.getProperty("AMS_INSTANCE_NAME");
     String amsInstanceName = System.getProperty("AMS_INSTANCE_NAME");
     if (amsInstanceName == null) {
     if (amsInstanceName == null) {

+ 3 - 3
ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/AbstractTimelineAggregator.java

@@ -317,9 +317,9 @@ public abstract class AbstractTimelineAggregator implements TimelineMetricAggreg
 
 
   protected void downsample(Connection conn, Long startTime, Long endTime) {
   protected void downsample(Connection conn, Long startTime, Long endTime) {
 
 
-    LOG.info("Checking for downsampling requests.");
+    LOG.debug("Checking for downsampling requests.");
     if (CollectionUtils.isEmpty(configuredDownSamplers)) {
     if (CollectionUtils.isEmpty(configuredDownSamplers)) {
-      LOG.info("No downsamplers configured");
+      LOG.debug("No downsamplers configured");
       return;
       return;
     }
     }
 
 
@@ -424,7 +424,7 @@ public abstract class AbstractTimelineAggregator implements TimelineMetricAggreg
 
 
     PreparedStatement stmt = null;
     PreparedStatement stmt = null;
     ResultSet rs = null;
     ResultSet rs = null;
-    LOG.info("Downsampling query : " + condition.getStatement());
+    LOG.debug("Downsampling query : " + condition.getStatement());
 
 
     try {
     try {
       stmt = PhoenixTransactSQL.prepareGetMetricsSqlStmt(conn, condition);
       stmt = PhoenixTransactSQL.prepareGetMetricsSqlStmt(conn, condition);

+ 2 - 2
ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/availability/MetricCollectorHAController.java

@@ -81,8 +81,8 @@ public class MetricCollectorHAController {
     }
     }
 
 
     try {
     try {
-      String zkClientPort = configuration.getZKClientPort();
-      String zkQuorum = configuration.getZKQuorum();
+      String zkClientPort = configuration.getClusterZKClientPort();
+      String zkQuorum = configuration.getClusterZKQuorum();
 
 
       if (StringUtils.isEmpty(zkClientPort) || StringUtils.isEmpty(zkQuorum)) {
       if (StringUtils.isEmpty(zkClientPort) || StringUtils.isEmpty(zkQuorum)) {
         throw new Exception("Unable to parse zookeeper quorum. clientPort = "
         throw new Exception("Unable to parse zookeeper quorum. clientPort = "

+ 13 - 1
ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java

@@ -25,8 +25,10 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.service.Service.STATE;
 import org.apache.hadoop.service.Service.STATE;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.HBaseTimelineMetricStore;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixHBaseAccessor;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixHBaseAccessor;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.availability.MetricCollectorHAController;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.DefaultPhoenixDataSource;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.DefaultPhoenixDataSource;
 import org.apache.zookeeper.ClientCnxn;
 import org.apache.zookeeper.ClientCnxn;
 import org.easymock.EasyMock;
 import org.easymock.EasyMock;
@@ -71,7 +73,7 @@ import static org.powermock.api.support.membermodification.MemberMatcher.method;
 import static org.powermock.api.support.membermodification.MemberModifier.suppress;
 import static org.powermock.api.support.membermodification.MemberModifier.suppress;
 
 
 @RunWith(PowerMockRunner.class)
 @RunWith(PowerMockRunner.class)
-@PrepareForTest({ PhoenixHBaseAccessor.class, UserGroupInformation.class,
+@PrepareForTest({ PhoenixHBaseAccessor.class, HBaseTimelineMetricStore.class, UserGroupInformation.class,
   ClientCnxn.class, DefaultPhoenixDataSource.class, ConnectionFactory.class,
   ClientCnxn.class, DefaultPhoenixDataSource.class, ConnectionFactory.class,
   TimelineMetricConfiguration.class, ApplicationHistoryServer.class })
   TimelineMetricConfiguration.class, ApplicationHistoryServer.class })
 @PowerMockIgnore( {"javax.management.*"})
 @PowerMockIgnore( {"javax.management.*"})
@@ -179,6 +181,8 @@ public class TestApplicationHistoryServer {
     expect(metricConfiguration.getTimelineMetricsServiceHandlerThreadCount()).andReturn(20).anyTimes();
     expect(metricConfiguration.getTimelineMetricsServiceHandlerThreadCount()).andReturn(20).anyTimes();
     expect(metricConfiguration.getWebappAddress()).andReturn("localhost:9990").anyTimes();
     expect(metricConfiguration.getWebappAddress()).andReturn("localhost:9990").anyTimes();
     expect(metricConfiguration.getTimelineServiceRpcAddress()).andReturn("localhost:10299").anyTimes();
     expect(metricConfiguration.getTimelineServiceRpcAddress()).andReturn("localhost:10299").anyTimes();
+    expect(metricConfiguration.getClusterZKQuorum()).andReturn("localhost").anyTimes();
+    expect(metricConfiguration.getClusterZKClientPort()).andReturn("2181").anyTimes();
 
 
     Connection connection = createNiceMock(Connection.class);
     Connection connection = createNiceMock(Connection.class);
     Statement stmt = createNiceMock(Statement.class);
     Statement stmt = createNiceMock(Statement.class);
@@ -197,6 +201,14 @@ public class TestApplicationHistoryServer {
     connection.close();
     connection.close();
     expectLastCall();
     expectLastCall();
 
 
+    MetricCollectorHAController haControllerMock = PowerMock.createMock(MetricCollectorHAController.class);
+    expectNew(MetricCollectorHAController.class, metricConfiguration)
+      .andReturn(haControllerMock);
+
+    haControllerMock.initializeHAController();
+    expectLastCall().once();
+    expect(haControllerMock.isInitialized()).andReturn(false).anyTimes();
+
     org.apache.hadoop.hbase.client.Connection conn = createNiceMock(org.apache.hadoop.hbase.client.Connection.class);
     org.apache.hadoop.hbase.client.Connection conn = createNiceMock(org.apache.hadoop.hbase.client.Connection.class);
     mockStatic(ConnectionFactory.class);
     mockStatic(ConnectionFactory.class);
     expect(ConnectionFactory.createConnection((Configuration) anyObject())).andReturn(conn);
     expect(ConnectionFactory.createConnection((Configuration) anyObject())).andReturn(conn);

+ 2 - 2
ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/availability/MetricCollectorHAControllerTest.java

@@ -51,8 +51,8 @@ public class MetricCollectorHAControllerTest extends AbstractMiniHBaseClusterTes
     String port = zkUrl.split(":")[3];
     String port = zkUrl.split(":")[3];
     String quorum = zkUrl.split(":")[2];
     String quorum = zkUrl.split(":")[2];
 
 
-    expect(configuration.getZKClientPort()).andReturn(port);
-    expect(configuration.getZKQuorum()).andReturn(quorum);
+    expect(configuration.getClusterZKClientPort()).andReturn(port);
+    expect(configuration.getClusterZKQuorum()).andReturn(quorum);
 
 
     replay(configuration);
     replay(configuration);
   }
   }

+ 1 - 1
ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml

@@ -82,7 +82,7 @@
   </property>
   </property>
   <property>
   <property>
     <name>failover_strategy_blacklisted_interval</name>
     <name>failover_strategy_blacklisted_interval</name>
-    <value>600</value>
+    <value>300</value>
     <description>
     <description>
       Metrics collector host will be blacklisted for specified number of seconds if metric monitor failed to connect to it.
       Metrics collector host will be blacklisted for specified number of seconds if metric monitor failed to connect to it.
     </description>
     </description>

+ 12 - 0
ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml

@@ -716,4 +716,16 @@
     </description>
     </description>
     <on-ambari-upgrade add="true"/>
     <on-ambari-upgrade add="true"/>
   </property>
   </property>
+  <property>
+    <name>cluster.zookeeper.quorum</name>
+    <value>{{cluster_zookeeper_quorum_hosts}}</value>
+    <description>Comma separated list of servers in the cluster ZooKeeper Quorum.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>cluster.zookeeper.property.clientPort</name>
+    <value>{{cluster_zookeeper_clientPort}}</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
 </configuration>
 </configuration>

+ 8 - 5
ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py

@@ -232,15 +232,18 @@ else:
 max_open_files_limit = default("/configurations/ams-hbase-env/max_open_files_limit", "32768")
 max_open_files_limit = default("/configurations/ams-hbase-env/max_open_files_limit", "32768")
 hostname = config["hostname"]
 hostname = config["hostname"]
 
 
+cluster_zookeeper_quorum_hosts = ",".join(config['clusterHostInfo']['zookeeper_hosts'])
+if 'zoo.cfg' in config['configurations'] and 'clientPort' in config['configurations']['zoo.cfg']:
+  cluster_zookeeper_clientPort = config['configurations']['zoo.cfg']['clientPort']
+else:
+  cluster_zookeeper_clientPort = '2181'
+
 if not is_hbase_distributed:
 if not is_hbase_distributed:
   zookeeper_quorum_hosts = hostname
   zookeeper_quorum_hosts = hostname
   zookeeper_clientPort = '61181'
   zookeeper_clientPort = '61181'
 else:
 else:
-  zookeeper_quorum_hosts = ",".join(config['clusterHostInfo']['zookeeper_hosts'])
-  if 'zoo.cfg' in config['configurations'] and 'clientPort' in config['configurations']['zoo.cfg']:
-    zookeeper_clientPort = config['configurations']['zoo.cfg']['clientPort']
-  else:
-    zookeeper_clientPort = '2181'
+  zookeeper_quorum_hosts = cluster_zookeeper_quorum_hosts
+  zookeeper_clientPort = cluster_zookeeper_clientPort
 
 
 ams_checkpoint_dir = config['configurations']['ams-site']['timeline.metrics.aggregator.checkpoint.dir']
 ams_checkpoint_dir = config['configurations']['ams-site']['timeline.metrics.aggregator.checkpoint.dir']
 _hbase_tmp_dir = config['configurations']['ams-hbase-site']['hbase.tmp.dir']
 _hbase_tmp_dir = config['configurations']['ams-hbase-site']['hbase.tmp.dir']