Browse Source

Revert "Integrate Druid with Ambari (Nishant Bangarwa, Slim Bouguerra via Swapan Shridhar)."

This reverts commit fca146a5d4284a8156ff3bcf468af7388a8283fe.
Swapan Shridhar 8 years ago
parent
commit
4a8544c5d5
31 changed files with 1 additions and 3769 deletions
  1. 0 100
      ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-broker.xml
  2. 0 205
      ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-common.xml
  3. 0 43
      ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-coordinator.xml
  4. 0 241
      ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-env.xml
  5. 0 88
      ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-historical.xml
  6. 0 84
      ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-log4j.xml
  7. 0 46
      ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-logrotate.xml
  8. 0 104
      ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-middlemanager.xml
  9. 0 52
      ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-overlord.xml
  10. 0 59
      ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-router.xml
  11. 0 249
      ambari-server/src/main/resources/common-services/DRUID/0.9.2/metainfo.xml
  12. 0 28
      ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/broker.py
  13. 0 28
      ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/coordinator.py
  14. 0 259
      ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/druid.py
  15. 0 85
      ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/druid_node.py
  16. 0 28
      ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/historical.py
  17. 0 28
      ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/middlemanager.py
  18. 0 28
      ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/overlord.py
  19. 0 129
      ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/params.py
  20. 0 28
      ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/router.py
  21. 0 44
      ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/service_check.py
  22. 0 24
      ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/status_params.py
  23. 0 37
      ambari-server/src/main/resources/common-services/DRUID/0.9.2/quicklinks/quicklinks.json
  24. 0 120
      ambari-server/src/main/resources/common-services/DRUID/0.9.2/themes/theme.json
  25. 0 19
      ambari-server/src/main/resources/stacks/HDP/2.6/role_command_order.json
  26. 0 78
      ambari-server/src/main/resources/stacks/HDP/2.6/services/DRUID/kerberos.json
  27. 0 27
      ambari-server/src/main/resources/stacks/HDP/2.6/services/DRUID/metainfo.xml
  28. 1 186
      ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
  29. 0 647
      ambari-server/src/test/python/stacks/2.6/DRUID/test_druid.py
  30. 0 575
      ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
  31. 0 100
      ambari-server/src/test/python/stacks/2.6/configs/default.json

+ 0 - 100
ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-broker.xml

@@ -1,100 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>druid.service</name>
-    <value>druid/broker</value>
-    <description>The druid.service name of broker node.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.port</name>
-    <value>8082</value>
-    <description>The port on which the broker will accept connections.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.broker.http.numConnections</name>
-    <value>20</value>
-    <description>Size of connection pool for the Broker to connect to historical and real-time nodes. If there are more
-      queries than this number that all need to speak to the same node, then they will queue up.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.server.http.numThreads</name>
-    <value>50</value>
-    <description>Number of threads for HTTP requests.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.processing.buffer.sizeBytes</name>
-    <value>1073741824</value>
-    <value-attributes>
-      <type>long</type>
-      <minimum>0</minimum>
-      <unit>Bytes</unit>
-    </value-attributes>
-    <description>This specifies a buffer size for the storage of intermediate results. The computation engine in both
-      the Historical and Realtime nodes will use a scratch buffer of this size to do all of their intermediate
-      computations
-      off-heap. Larger values allow for more aggregations in a single pass over the data while smaller values can
-      require more passes depending on the query that is being executed.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.processing.numThreads</name>
-    <value>2</value>
-    <description>The number of processing threads to have available for parallel processing of segments.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.broker.cache.useCache</name>
-    <value>true</value>
-    <description>Enable the cache on the broker.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.broker.cache.populateCache</name>
-    <value>true</value>
-    <description>Populate the cache on the broker.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.cache.type</name>
-    <value>local</value>
-    <description>The type of cache to use for queries.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.cache.sizeInBytes</name>
-    <value>10000000</value>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <unit>Bytes</unit>
-    </value-attributes>
-    <description>Maximum cache size in bytes. Zero disables caching.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

+ 0 - 205
ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-common.xml

@@ -1,205 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>druid.extensions.pullList</name>
-    <value>[]</value>
-    <description>A comma-separated list of one or more druid extensions to download from maven.</description>
-    <depends-on>
-      <property>    
-        <type>druid-common</type>  
-        <name>druid.metadata.storage.type</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.extensions.loadList</name>
-    <value>["druid-datasketches"]
-    </value>
-    <depends-on>
-      <property>    
-        <type>druid-common</type>  
-        <name>druid.metadata.storage.type</name>
-      </property>
-    </depends-on>
-    <description>A comma-separated list of one or more druid extensions to load.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.zk.service.host</name>
-    <value>localhost:2181</value>
-    <description>
-      zookeeper connection string.
-    </description>
-  </property>
-  <property>
-    <name>druid.zk.paths.base</name>
-    <value>/druid</value>
-    <description>
-      Base Zookeeper path
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.discovery.curator.path</name>
-    <value>/druid/discovery</value>
-    <description>
-      Services announce themselves under this ZooKeeper path.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.storage.type</name>
-    <value></value>
-    <description>
-      Choices:local, noop, s3, hdfs, c*. The type of deep storage to use.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.storage.storageDirectory</name>
-    <value></value>
-    <description>
-      directory to use as deep storage.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.metadata.storage.connector.password</name>
-    <value></value>
-    <property-type>PASSWORD</property-type>
-    <display-name>Metadata storage password</display-name>
-    <description>Password for the metadata storage data base.</description>
-    <value-attributes>
-      <type>password</type>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.metadata.storage.connector.user</name>
-    <value>druid</value>
-    <display-name>Metadata storage user</display-name>
-    <description>Metadata storage user</description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.metadata.storage.connector.port</name>
-    <value>1527</value>
-    <display-name>Metadata storage port</display-name>
-    <description>Metadata storage port</description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <depends-on>
-      <property>    
-        <type>druid-common</type>  
-        <name>druid.metadata.storage.type</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>database_name</name>
-    <value>druid</value>
-    <display-name>Metadata storage database name</display-name>
-    <description>Metadata storage database name</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>metastore_hostname</name>
-    <value>localhost</value>
-    <display-name>Metadata storage hostname name</display-name>
-    <description>Metadata storage hostname name</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property require-input="true">
-    <name>druid.metadata.storage.type</name>
-    <display-name>Metadata storage type</display-name>
-    <value>derby</value>
-    <value-attributes>
-      <overridable>false</overridable>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>mysql</value>
-          <label>MYSQL</label>
-        </entry>
-        <entry>
-          <value>derby</value>
-          <label>DERBY</label>
-        </entry>
-        <entry>
-          <value>postgres</value>
-          <label>POSTGRES</label>
-        </entry>
-      </entries>
-    </value-attributes>
-    <description>Type of the metadata storage. Note that derby will work only if all the druid node are located
-      within the same node. Use mysql or postgres for distributed mode.
-      mysql installed by ambari is only for development and not suitable for production use cases due to it being not HA
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property require-input="true">
-    <name>druid.metadata.storage.connector.connectURI</name>
-    <value>jdbc:derby://localhost:1527/druid;create=true</value>
-    <display-name>Metadata storage connector url</display-name>
-    <description>Metadata storage connector url</description>
-    <on-ambari-upgrade add="false"/>
-    <depends-on>
-      <property>    
-        <type>druid-common</type>  
-        <name>database_name</name>
-      </property>
-      <property>    
-        <type>druid-common</type>  
-        <name>metastore_hostname</name>
-      </property>
-      <property>    
-        <type>druid-common</type>  
-        <name>druid.metadata.storage.type</name>
-      </property>
-      <property>    
-        <type>druid-common</type>  
-        <name>druid.metadata.storage.connector.port</name>
-      </property>
-    </depends-on>
-  </property>
-  <property>
-    <name>druid.hadoop.security.kerberos.principal</name>
-    <display-name>kerberos principal</display-name>
-    <description>Kerberos principal e.g druid@EXAMPLE.COM</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.hadoop.security.kerberos.keytab</name>
-    <display-name>Kerberos keytab location</display-name>
-    <description>Kerberos keytab location</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-</configuration>

+ 0 - 43
ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-coordinator.xml

@@ -1,43 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>druid.service</name>
-    <value>druid/coordinator</value>
-    <description>The druid.service name of coordinator node.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.port</name>
-    <value>8081</value>
-    <description>The port on which the coordinator will accept connections.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.coordinator.merge.on</name>
-    <value>false</value>
-    <description>Boolean flag for whether or not the coordinator should try and merge small segments into a more optimal
-      segment size.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

+ 0 - 241
ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-env.xml

@@ -1,241 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <!--Heap Settings -->
-  <property>
-    <name>druid.broker.jvm.heap.memory</name>
-    <value>2048</value>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>65536</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.coordinator.jvm.heap.memory</name>
-    <value>512</value>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>65536</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.middlemanager.jvm.heap.memory</name>
-    <value>256</value>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>65536</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.historical.jvm.heap.memory</name>
-    <value>2048</value>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>65536</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.overlord.jvm.heap.memory</name>
-    <value>512</value>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>65536</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.router.jvm.heap.memory</name>
-    <value>512</value>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>65536</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <!-- DirectMemorySettings -->
-  <property>
-    <name>druid.broker.jvm.direct.memory</name>
-    <value>1048576</value>
-    <depends-on>
-      <property>    
-        <type>druid-broker</type>  
-        <name>druid.processing.buffer.sizeBytes</name>
-      </property>
-      <property>    
-        <type>druid-broker</type>  
-        <name>druid.processing.numThreads</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.coordinator.jvm.direct.memory</name>
-    <value>1048576</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.middlemanager.jvm.direct.memory</name>
-    <value>1048576</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.historical.jvm.direct.memory</name>
-    <value>1048576</value>
-    <depends-on>
-      <property>
-        <type>druid-historical</type>  
-        <name>druid.processing.buffer.sizeBytes</name>
-      </property>
-      <property>    
-        <type>druid-historical</type>  
-        <name>druid.processing.numThreads</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.overlord.jvm.direct.memory</name>
-    <value>1048576</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.router.jvm.direct.memory</name>
-    <value>1048576</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <!-- JavaOpts Tune GC related configs here-->
-  <property>
-    <name>druid.broker.jvm.opts</name>
-    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.coordinator.jvm.opts</name>
-    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.middlemanager.jvm.opts</name>
-    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.historical.jvm.opts</name>
-    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.overlord.jvm.opts</name>
-    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.router.jvm.opts</name>
-    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid_user</name>
-    <display-name>Druid User</display-name>
-    <value>druid</value>
-    <property-type>USER</property-type>
-    <description></description>
-    <value-attributes>
-      <type>user</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid_log_dir</name>
-    <value>/var/log/druid</value>
-    <description></description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid_pid_dir</name>
-    <value>/var/run/druid</value>
-    <display-name>Druid PID dir</display-name>
-    <description></description>
-    <value-attributes>
-      <type>directory</type>
-      <editable-only-at-install>true</editable-only-at-install>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <!-- druid-env.sh -->
-  <property>
-    <name>content</name>
-    <display-name>druid-env template</display-name>
-    <description>This is simple template for druid-env.sh file</description>
-    <value>
-      #!/bin/bash
-
-      # Set DRUID specific environment variables here.
-
-      # The java implementation to use.
-      export JAVA_HOME={{java8_home}}
-      export PATH=$PATH:$JAVA_HOME/bin
-      export DRUID_PID_DIR={{druid_pid_dir}}
-      export DRUID_LOG_DIR={{druid_log_dir}}
-      export DRUID_CONF_DIR={{druid_conf_dir}}
-      export DRUID_LIB_DIR={{druid_home}}/lib
-
-    </value>
-    <value-attributes>
-      <type>content</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

+ 0 - 88
ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-historical.xml

@@ -1,88 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>druid.service</name>
-    <value>druid/historical</value>
-    <description>The druid.service name of historical node.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.port</name>
-    <value>8083</value>
-    <description>The port on which the historical nodes will accept connections.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.server.http.numThreads</name>
-    <value>50</value>
-    <description>Number of threads for HTTP requests.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.processing.buffer.sizeBytes</name>
-    <value>1073741824</value>
-    <value-attributes>
-      <type>long</type>
-      <minimum>0</minimum>
-      <unit>Bytes</unit>
-    </value-attributes>
-    <description>This specifies a buffer size for the storage of intermediate results. The computation engine in both
-      the Historical and Realtime nodes will use a scratch buffer of this size to do all of their intermediate
-      computations off-heap. Larger values allow for more aggregations in a single pass over the data while smaller
-      values can require more passes depending on the query that is being executed.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.processing.numThreads</name>
-    <value>10</value>
-    <description>The number of processing threads to have available for parallel processing of segments.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.segmentCache.locations</name>
-    <value>[{"path":"/apps/druid/segmentCache","maxSize"\:300000000000}]</value>
-    <description>Segments assigned to a Historical node are first stored on the local file system (in a disk cache) and
-      then served by the Historical node. These locations define where that local cache resides.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.server.maxSize</name>
-    <value>300000000000</value>
-    <description>The maximum number of bytes-worth of segments that the node wants assigned to it. This is not a limit
-      that Historical nodes actually enforces, just a value published to the Coordinator node so it can plan
-      accordingly.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.segmentCache.infoDir</name>
-    <value>/apps/druid/segmentCache</value>
-    <description>Historical nodes keep track of the segments they are serving so that when the process is restarted they
-      can reload the same segments without waiting for the Coordinator to reassign. This path defines where this
-      metadata is kept. Directory will be created if needed.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

+ 0 - 84
ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-log4j.xml

@@ -1,84 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_adding_forbidden="true">
-  <property>
-    <name>druid_log_level</name>
-    <value>info</value>
-    <description>Log level for io.druid logging</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>root_log_level</name>
-    <value>WARN</value>
-    <description>Log level for root logging</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>metamx_log_level</name>
-    <value>info</value>
-    <description>Log level for com.metamxlogging</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>druid-log4j template</display-name>
-    <description>Custom log4j.properties</description>
-    <value><![CDATA[<?xml version="1.0" encoding="UTF-8" ?>
-<!--
-  ~ Licensed to the Apache Software Foundation (ASF) under one
-  ~ or more contributor license agreements.  See the NOTICE file
-  ~ distributed with this work for additional information
-  ~ regarding copyright ownership.  The ASF licenses this file
-  ~ to you under the Apache License, Version 2.0 (the
-  ~ "License"); you may not use this file except in compliance
-  ~ with the License.  You may obtain a copy of the License at
-  ~
-  ~     http://www.apache.org/licenses/LICENSE-2.0
-  ~
-  ~ Unless required by applicable law or agreed to in writing, software
-  ~ distributed under the License is distributed on an "AS IS" BASIS,
-  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  ~ See the License for the specific language governing permissions and
-  ~ limitations under the License.
-  -->
-    <Configuration>
-        <Appenders>
-            <Console name="Console" target="SYSTEM_OUT">
-                <PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/>
-            </Console>
-        </Appenders>
-        <Loggers>
-            <Logger name="com.metamx" level="{{metamx_log_level}}"/>
-            <Logger name="io.druid" level="{{druid_log_level}}"/>
-            <Root level="{{root_log_level}}">
-                <AppenderRef ref="Console"/>
-            </Root>
-        </Loggers>
-    </Configuration>
-      ]]></value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

+ 0 - 46
ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-logrotate.xml

@@ -1,46 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_adding_forbidden="true">
-  <property>
-    <name>content</name>
-    <display-name>druid logrotate template</display-name>
-    <description>Custom logrotate file</description>
-    <value><![CDATA[
-    {{druid_log_dir}}/*.log {
-        copytruncate
-        rotate 7
-        daily
-        nocompress
-        missingok
-        notifempty
-        create 660 druid users
-        dateext
-        dateformat -%Y-%m-%d-%s
-        }
-      ]]></value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

+ 0 - 104
ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-middlemanager.xml

@@ -1,104 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>druid.service</name>
-    <value>druid/middlemanager</value>
-    <description>The druid.service name of middlemanager node.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.port</name>
-    <value>8091</value>
-    <description>The port on which the middlemanager nodes will accept connections.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.indexer.runner.startPort</name>
-    <value>8100</value>
-    <description>The port that peons begin running on.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.worker.capacity</name>
-    <value>3</value>
-    <description>
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.indexer.runner.javaOpts</name>
-    <value>-server -Xmx2g -Duser.timezone=UTC -Dfile.encoding=UTF-8 -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager -Dhdp.version={{stack_version}} -Dhadoop.mapreduce.job.classloader=true</value>
-    <description>
-      A string of -X Java options to pass to the peon's JVM.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.indexer.task.baseTaskDir</name>
-    <value>/tmp/persistent/tasks</value>
-    <description>
-      Base temporary working directory for druid tasks.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.server.http.numThreads</name>
-    <value>50</value>
-    <description>
-      Number of threads for HTTP requests.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.processing.buffer.sizeBytes</name>
-    <value>256000000</value>
-    <value-attributes>
-      <type>long</type>
-      <minimum>0</minimum>
-      <unit>Bytes</unit>
-    </value-attributes>
-    <description>
-      This specifies a buffer size for the storage of intermediate results. The computation engine in both the
-      Historical and Realtime nodes will use a scratch buffer of this size to do all of their intermediate computations
-      off-heap. Larger values allow for more aggregations in a single pass over the data while smaller values can
-      require more passes depending on the query that is being executed.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.processing.numThreads</name>
-    <value>2</value>
-    <description>
-      The number of processing threads to have available for parallel processing of segments.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.indexer.task.hadoopWorkingPath</name>
-    <value>/tmp/druid-indexing</value>
-    <description>
-      Temporary working directory for Hadoop tasks
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

+ 0 - 52
ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-overlord.xml

@@ -1,52 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>druid.service</name>
-    <value>druid/overlord</value>
-    <description>The druid.service name of overlord node.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.port</name>
-    <value>8090</value>
-    <description>The port on which the overlord will accept connections.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.indexer.runner.type</name>
-    <value>remote</value>
-    <description>Choices "local" or "remote". Indicates whether tasks should be run locally or in a distributed
-      environment.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.indexer.storage.type</name>
-    <value>metadata</value>
-    <description>Choices are "local" or "metadata". Indicates whether incoming tasks should be stored locally (in heap)
-      or in metadata storage. Storing incoming tasks in metadata storage allows for tasks to be resumed if the overlord
-      should fail.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

+ 0 - 59
ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-router.xml

@@ -1,59 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>druid.service</name>
-    <value>druid/router</value>
-    <description>The druid.service name of router node.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.port</name>
-    <value>8888</value>
-    <description>The port on which the broker will accept connections.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.router.http.numConnections</name>
-    <value>20</value>
-    <description>
-      Size of connection pool for the router to connect to historical and real-time nodes. If there are more
-      queries than this number that all need to speak to the same node, then they will queue up.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.server.http.numThreads</name>
-    <value>50</value>
-    <description>Number of threads for HTTP requests.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.router.tierToBrokerMap</name>
-    <value>{}</value>
-    <description>
-      Used to route queries for a certain tier of data to their appropriate broker. An ordered JSON map of
-      tiers to broker names. The priority of brokers is based on the ordering.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

+ 0 - 249
ambari-server/src/main/resources/common-services/DRUID/0.9.2/metainfo.xml

@@ -1,249 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>DRUID</name>
-      <displayName>Druid</displayName>
-      <comment>A fast column-oriented distributed data store.</comment>
-      <version>0.9.2</version>
-      <components>
-        <component>
-          <name>DRUID_COORDINATOR</name>
-          <displayName>Druid Coordinator</displayName>
-          <category>MASTER</category>
-          <cardinality>1+</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/coordinator.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-        <component>
-          <name>DRUID_OVERLORD</name>
-          <displayName>Druid Overlord</displayName>
-          <category>MASTER</category>
-          <cardinality>1+</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/overlord.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-        <component>
-          <name>DRUID_HISTORICAL</name>
-          <displayName>Druid Historical</displayName>
-          <category>SLAVE</category>
-          <cardinality>1+</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/historical.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-        <component>
-          <name>DRUID_BROKER</name>
-          <displayName>Druid Broker</displayName>
-          <category>MASTER</category>
-          <cardinality>1+</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/broker.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-        <component>
-          <name>DRUID_MIDDLEMANAGER</name>
-          <displayName>Druid MiddleManager</displayName>
-          <category>SLAVE</category>
-          <cardinality>1+</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/middlemanager.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-        <component>
-          <name>DRUID_ROUTER</name>
-          <displayName>Druid Router</displayName>
-          <category>SLAVE</category>
-          <cardinality>0+</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/router.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-
-      <themes>
-        <theme>
-          <fileName>theme.json</fileName>
-          <default>true</default>
-        </theme>
-      </themes>
-
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>redhat7,amazon2015,redhat6,suse11,suse12</osFamily>
-          <packages>
-            <package>
-              <name>druid_${stack_version}</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
-          <packages>
-            <package>
-              <name>druid-${stack_version}</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>amazon2015,redhat6,suse11,suse12</osFamily>
-          <packages>
-            <package>
-              <name>mysql</name>
-              <skipUpgrade>true</skipUpgrade>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>amazon2015,redhat6,debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
-          <packages>
-            <package>
-              <name>mysql-server</name>
-              <skipUpgrade>true</skipUpgrade>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>redhat7</osFamily>
-          <packages>
-            <package>
-              <name>mysql-community-release</name>
-              <skipUpgrade>true</skipUpgrade>
-            </package>
-            <package>
-              <name>mysql-community-server</name>
-              <skipUpgrade>true</skipUpgrade>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>suse11,suse12</osFamily>
-          <packages>
-            <package>
-              <name>mysql-client</name>
-              <skipUpgrade>true</skipUpgrade>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-      <requiredServices>
-        <service>ZOOKEEPER</service>
-      </requiredServices>
-      <configuration-dependencies>
-        <config-type>druid-common</config-type>
-        <config-type>druid-env</config-type>
-        <config-type>druid-coordinator</config-type>
-        <config-type>druid-overlord</config-type>
-        <config-type>druid-historical</config-type>
-        <config-type>druid-broker</config-type>
-        <config-type>druid-middlemanager</config-type>
-        <config-type>druid-log4j</config-type>
-        <config-type>druid-logrotate</config-type>
-        <config-type>druid-router</config-type>
-        <config-type>zoo.cfg</config-type>
-        <config-type>core-site</config-type>
-        <config-type>mapred-site</config-type>
-        <config-type>yarn-site</config-type>
-        <config-type>hdfs-site</config-type>
-      </configuration-dependencies>
-
-      <quickLinksConfigurations>
-        <quickLinksConfiguration>
-          <fileName>quicklinks.json</fileName>
-          <default>true</default>
-        </quickLinksConfiguration>
-      </quickLinksConfigurations>
-
-    </service>
-  </services>
-</metainfo>

+ 0 - 28
ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/broker.py

@@ -1,28 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from druid_node import DruidBase
-
-
-class DruidBroker(DruidBase):
-  def __init__(self):
-    DruidBase.__init__(self, nodeType="broker")
-
-
-if __name__ == "__main__":
-  DruidBroker().execute()

+ 0 - 28
ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/coordinator.py

@@ -1,28 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from druid_node import DruidBase
-
-
-class DruidCoordinator(DruidBase):
-  def __init__(self):
-    DruidBase.__init__(self, nodeType="coordinator")
-
-
-if __name__ == "__main__":
-  DruidCoordinator().execute()

+ 0 - 259
ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/druid.py

@@ -1,259 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from resource_management.libraries.resources.properties_file import PropertiesFile
-from resource_management.core.resources.system import Directory, Execute, File
-from resource_management.core.source import InlineTemplate
-from resource_management.libraries.functions import format
-from resource_management.libraries.resources import XmlConfig
-from resource_management.libraries.functions.show_logs import show_logs
-from resource_management.core.logger import Logger
-
-
-def druid(upgrade_type=None, nodeType=None):
-  import params
-  ensure_base_directories()
-
-  # Environment Variables
-  File(format("{params.druid_conf_dir}/druid-env.sh"),
-       owner=params.druid_user,
-       content=InlineTemplate(params.druid_env_sh_template)
-       )
-
-  # common config
-  druid_common_config = mutable_config_dict(params.config['configurations']['druid-common'])
-  # User cannot override below configs
-  druid_common_config['druid.host'] = params.hostname
-  druid_common_config['druid.extensions.directory'] = params.druid_extensions_dir
-  druid_common_config['druid.extensions.hadoopDependenciesDir'] = params.druid_hadoop_dependencies_dir
-  druid_common_config['druid.selectors.indexing.serviceName'] = params.config['configurations']['druid-overlord'][
-    'druid.service']
-  druid_common_config['druid.selectors.coordinator.serviceName'] = \
-    params.config['configurations']['druid-coordinator']['druid.service']
-
-  # delete the password and user if empty otherwiswe derby will fail.
-  if 'derby' == druid_common_config['druid.metadata.storage.type']:
-    del druid_common_config['druid.metadata.storage.connector.user']
-    del druid_common_config['druid.metadata.storage.connector.password']
-
-  druid_env_config = mutable_config_dict(params.config['configurations']['druid-env'])
-
-  PropertiesFile("common.runtime.properties",
-                 dir=params.druid_common_conf_dir,
-                 properties=druid_common_config,
-                 owner=params.druid_user,
-                 group=params.user_group,
-                 )
-  Logger.info("Created common.runtime.properties")
-
-  File(format("{params.druid_common_conf_dir}/druid-log4j.xml"),
-       mode=0644,
-       owner=params.druid_user,
-       group=params.user_group,
-       content=InlineTemplate(params.log4j_props)
-       )
-  Logger.info("Created log4j file")
-
-  File("/etc/logrotate.d/druid",
-       mode=0644,
-       owner='root',
-       group='root',
-       content=InlineTemplate(params.logrotate_props)
-       )
-
-  Logger.info("Created log rotate file")
-
-  # Write Hadoop Configs if configured
-  if 'core-site' in params.config['configurations']:
-    XmlConfig("core-site.xml",
-              conf_dir=params.druid_common_conf_dir,
-              configurations=params.config['configurations']['core-site'],
-              configuration_attributes=params.config['configuration_attributes']['core-site'],
-              owner=params.druid_user,
-              group=params.user_group
-              )
-
-  if 'mapred-site' in params.config['configurations']:
-    XmlConfig("mapred-site.xml",
-              conf_dir=params.druid_common_conf_dir,
-              configurations=params.config['configurations']['mapred-site'],
-              configuration_attributes=params.config['configuration_attributes']['mapred-site'],
-              owner=params.druid_user,
-              group=params.user_group
-              )
-
-  if 'yarn-site' in params.config['configurations']:
-    XmlConfig("yarn-site.xml",
-              conf_dir=params.druid_common_conf_dir,
-              configurations=params.config['configurations']['yarn-site'],
-              configuration_attributes=params.config['configuration_attributes']['yarn-site'],
-              owner=params.druid_user,
-              group=params.user_group
-              )
-
-  if 'hdfs-site' in params.config['configurations']:
-    XmlConfig("hdfs-site.xml",
-              conf_dir=params.druid_common_conf_dir,
-              configurations=params.config['configurations']['hdfs-site'],
-              configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
-              owner=params.druid_user,
-              group=params.user_group
-              )
-
-  # node specific configs
-  for node_type in ['coordinator', 'overlord', 'historical', 'broker', 'middleManager', 'router']:
-    node_config_dir = format('{params.druid_conf_dir}/{node_type}')
-    node_type_lowercase = node_type.lower()
-
-    # Write runtime.properties file
-    node_config = mutable_config_dict(params.config['configurations'][format('druid-{node_type_lowercase}')])
-    PropertiesFile("runtime.properties",
-                   dir=node_config_dir,
-                   properties=node_config,
-                   owner=params.druid_user,
-                   group=params.user_group,
-                   )
-    Logger.info(format("Created druid-{node_type_lowercase} runtime.properties"))
-
-    # Write jvm configs
-    File(format('{node_config_dir}/jvm.config'),
-         owner=params.druid_user,
-         group=params.user_group,
-         content=InlineTemplate(
-           "-server \n-Xms{{node_heap_memory}}m \n-Xmx{{node_heap_memory}}m \n-XX:MaxDirectMemorySize={{node_direct_memory}}m \n-Dlog4j.configurationFile={{log4j_config_file}} \n-Dlog4j.debug \n{{node_jvm_opts}}",
-           node_heap_memory=druid_env_config[format('druid.{node_type_lowercase}.jvm.heap.memory')],
-           log4j_config_file=format("{params.druid_common_conf_dir}/druid-log4j.xml"),
-           node_direct_memory=druid_env_config[
-             format('druid.{node_type_lowercase}.jvm.direct.memory')],
-           node_jvm_opts=druid_env_config[format('druid.{node_type_lowercase}.jvm.opts')])
-         )
-    Logger.info(format("Created druid-{node_type_lowercase} jvm.config"))
-
-  # All druid nodes have dependency on hdfs_client
-  ensure_hadoop_directories()
-  # Pull all required dependencies
-  pulldeps()
-
-
-def mutable_config_dict(config):
-  rv = {}
-  for key, value in config.iteritems():
-    rv[key] = value
-  return rv
-
-
-def ensure_hadoop_directories():
-  import params
-  if 'hdfs-site' not in params.config['configurations']:
-    # HDFS Not Installed nothing to do.
-    Logger.info("Skipping HDFS directory creation as HDFS not installed")
-    return
-
-  druid_common_config = params.config['configurations']['druid-common']
-  # final overlord config contains both common and overlord config
-  druid_middlemanager_config = params.config['configurations']['druid-middlemanager']
-
-  # If user is using HDFS as deep storage create HDFS Directory for storing segments
-  deep_storage = druid_common_config["druid.storage.type"]
-  storage_dir = druid_common_config["druid.storage.storageDirectory"]
-
-  if deep_storage == 'hdfs':
-    # create the home dir for druid
-    params.HdfsResource(format("/user/{params.druid_user}"),
-                        type="directory",
-                        action="create_on_execute",
-                        owner=params.druid_user,
-                        recursive_chown=True,
-                        recursive_chmod=True
-                        )
-
-    # create the segment storage dir
-    create_hadoop_directory(storage_dir)
-
-  # Create HadoopIndexTask hadoopWorkingPath
-  hadoop_working_path = druid_middlemanager_config['druid.indexer.task.hadoopWorkingPath']
-  if hadoop_working_path is not None:
-    create_hadoop_directory(hadoop_working_path)
-
-  # If HDFS is used for storing logs, create Index Task log directory
-  indexer_logs_type = druid_common_config['druid.indexer.logs.type']
-  indexer_logs_directory = druid_common_config['druid.indexer.logs.directory']
-  if indexer_logs_type == 'hdfs' and indexer_logs_directory is not None:
-    create_hadoop_directory(indexer_logs_directory)
-
-
-def create_hadoop_directory(hadoop_dir):
-  import params
-  params.HdfsResource(hadoop_dir,
-                      type="directory",
-                      action="create_on_execute",
-                      owner=params.druid_user,
-                      mode=0755
-                      )
-  Logger.info(format("Created Hadoop Directory [{hadoop_dir}]"))
-
-
-def ensure_base_directories():
-  import params
-  Directory(
-    [params.druid_log_dir, params.druid_pid_dir, params.druid_common_conf_dir, params.druid_coordinator_conf_dir,
-     params.druid_broker_conf_dir, params.druid_middlemanager_conf_dir, params.druid_historical_conf_dir,
-     params.druid_overlord_conf_dir, params.druid_router_conf_dir, params.druid_segment_infoDir],
-    mode=0755,
-    cd_access='a',
-    owner=params.druid_user,
-    group=params.user_group,
-    create_parents=True,
-    recursive_ownership=True,
-  )
-
-
-def get_daemon_cmd(params=None, node_type=None, command=None):
-  return format('source {params.druid_conf_dir}/druid-env.sh ; {params.druid_home}/bin/node.sh {node_type} {command}')
-
-
-def getPid(params=None, nodeType=None):
-  return format('{params.druid_pid_dir}/{nodeType}.pid')
-
-
-def pulldeps():
-  import params
-  extensions_list = eval(params.druid_extensions)
-  extensions_string = '{0}'.format("-c ".join(extensions_list))
-  if len(extensions_list) > 0:
-    try:
-      # Make sure druid user has permissions to write dependencies
-      Directory(
-        [params.druid_extensions_dir, params.druid_hadoop_dependencies_dir],
-        mode=0755,
-        cd_access='a',
-        owner=params.druid_user,
-        group=params.user_group,
-        create_parents=True,
-        recursive_ownership=True,
-      )
-      Execute(format(
-        "source {params.druid_conf_dir}/druid-env.sh ; java -classpath '{params.druid_home}/lib/*' -Ddruid.extensions.loadList=[] "
-        "-Ddruid.extensions.directory={params.druid_extensions_dir} -Ddruid.extensions.hadoopDependenciesDir={params.druid_hadoop_dependencies_dir} "
-        "io.druid.cli.Main tools pull-deps -c {extensions_string} --no-default-hadoop"),
-        user=params.druid_user
-      )
-      Logger.info(format("Pull Dependencies Complete"))
-    except:
-      show_logs(params.druid_log_dir, params.druid_user)
-      raise

+ 0 - 85
ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/druid_node.py

@@ -1,85 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from resource_management import Script
-from resource_management.core.resources.system import Execute
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.check_process_status import check_process_status
-
-from resource_management.libraries.functions.show_logs import show_logs
-from druid import druid, get_daemon_cmd, getPid
-
-
-class DruidBase(Script):
-  def __init__(self, nodeType=None):
-    self.nodeType = nodeType
-
-  def get_component_name(self):
-    node_type_lower = self.nodeType.lower()
-    return format("druid-{node_type_lower}")
-
-  def install(self, env):
-    self.install_packages(env)
-
-  def configure(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    druid(upgrade_type=upgrade_type, nodeType=self.nodeType)
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    return
-
-  def start(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    self.configure(env, upgrade_type=upgrade_type)
-    daemon_cmd = get_daemon_cmd(params, self.nodeType, "start")
-    try:
-      Execute(daemon_cmd,
-              user=params.druid_user
-              )
-    except:
-      show_logs(params.druid_log_dir, params.druid_user)
-      raise
-
-  def stop(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-
-    daemon_cmd = get_daemon_cmd(params, self.nodeType, "stop")
-    try:
-      Execute(daemon_cmd,
-              user=params.druid_user
-              )
-    except:
-      show_logs(params.druid_log_dir, params.druid_user)
-      raise
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    pid_file = getPid(status_params, self.nodeType)
-    check_process_status(pid_file)
-
-  def get_log_folder(self):
-    import params
-    return params.druid_log_dir
-
-  def get_user(self):
-    import params
-    return params.druid_user

+ 0 - 28
ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/historical.py

@@ -1,28 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from druid_node import DruidBase
-
-
-class DruidHistorical(DruidBase):
-  def __init__(self):
-    DruidBase.__init__(self, nodeType="historical")
-
-
-if __name__ == "__main__":
-  DruidHistorical().execute()

+ 0 - 28
ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/middlemanager.py

@@ -1,28 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from druid_node import DruidBase
-
-
-class DruidMiddleManager(DruidBase):
-  def __init__(self):
-    DruidBase.__init__(self, nodeType="middleManager")
-
-
-if __name__ == "__main__":
-  DruidMiddleManager().execute()

+ 0 - 28
ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/overlord.py

@@ -1,28 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from druid_node import DruidBase
-
-
-class DruidOverlord(DruidBase):
-  def __init__(self):
-    DruidBase.__init__(self, nodeType="overlord")
-
-
-if __name__ == "__main__":
-  DruidOverlord().execute()

+ 0 - 129
ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/params.py

@@ -1,129 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from ambari_commons import OSCheck
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.resources.hdfs_resource import HdfsResource
-from resource_management.libraries.functions import get_kinit_path
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
-from resource_management.libraries.functions.default import default
-
-import status_params
-
-# a map of the Ambari role to the component name
-# for use with <stack-root>/current/<component>
-SERVER_ROLE_DIRECTORY_MAP = {
-  'DRUID_BROKER': 'druid-broker',
-  'DRUID_COORDINATOR': 'druid-coordinator',
-  'DRUID_HISTORICAL': 'druid-historical',
-  'DRUID_MIDDLEMANAGER': 'druid-middlemanager',
-  'DRUID_OVERLORD': 'druid-overlord',
-  'DRUID_ROUTER': 'druid-router'
-}
-
-# server configurations
-config = Script.get_config()
-stack_root = Script.get_stack_root()
-tmp_dir = Script.get_tmp_dir()
-
-# stack version
-stack_version = default("/commandParams/version", None)
-
-# default role to coordinator needed for service checks
-component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "DRUID_COORDINATOR")
-
-hostname = config['hostname']
-
-# default druid parameters
-druid_home = format("{stack_root}/current/{component_directory}")
-druid_conf_dir = format("{stack_root}/current/{component_directory}/conf")
-
-druid_common_conf_dir = druid_conf_dir + "/_common"
-druid_coordinator_conf_dir = druid_conf_dir + "/coordinator"
-druid_overlord_conf_dir = druid_conf_dir + "/overlord"
-druid_broker_conf_dir = druid_conf_dir + "/broker"
-druid_historical_conf_dir = druid_conf_dir + "/historical"
-druid_middlemanager_conf_dir = druid_conf_dir + "/middleManager"
-druid_router_conf_dir = druid_conf_dir + "/router"
-druid_extensions_dir = druid_home + "/extensions"
-druid_hadoop_dependencies_dir = druid_home + "/hadoop-dependencies"
-druid_segment_infoDir = config['configurations']['druid-historical']['druid.segmentCache.infoDir']
-druid_user = config['configurations']['druid-env']['druid_user']
-druid_log_dir = config['configurations']['druid-env']['druid_log_dir']
-druid_classpath = config['configurations']['druid-env']['druid_classpath']
-druid_extensions = config['configurations']['druid-common']['druid.extensions.pullList']
-
-# status params
-druid_pid_dir = status_params.druid_pid_dir
-user_group = config['configurations']['cluster-env']['user_group']
-java8_home = config['hostLevelParams']['java_home']
-druid_env_sh_template = config['configurations']['druid-env']['content']
-
-# log4j params
-log4j_props = config['configurations']['druid-log4j']['content']
-druid_log_level = config['configurations']['druid-log4j']['druid_log_level']
-metamx_log_level = config['configurations']['druid-log4j']['metamx_log_level']
-root_log_level = config['configurations']['druid-log4j']['root_log_level']
-logrotate_props = config['configurations']['druid-logrotate']['content']
-
-# Metadata storage
-metadata_storage_user = config['configurations']['druid-common']['druid.metadata.storage.connector.user']
-metadata_storage_password = config['configurations']['druid-common']['druid.metadata.storage.connector.password']
-metadata_storage_db_name = config['configurations']['druid-common']['database_name']
-
-# HDFS
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
-hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', 'missing_principal').replace("_HOST",
-                                                                                                             hostname)
-hdfs_site = config['configurations']['hdfs-site']
-default_fs = config['configurations']['core-site']['fs.defaultFS']
-dfs_type = default("/commandParams/dfs_type", "")
-
-# Kerberose
-druid_principal_name = default('/configurations/druid-common/druid.hadoop.security.kerberos.principal',
-                               'missing_principal')
-druid_user_keytab = default('/configurations/druid-common/druid.hadoop.security.kerberos.keytab', 'missing_keytab')
-
-import functools
-
-# create partial functions with common arguments for every HdfsResource call
-# to create hdfs directory we need to call params.HdfsResource in code
-HdfsResource = functools.partial(
-  HdfsResource,
-  user=hdfs_user,
-  hdfs_resource_ignore_file="/var/lib/ambari-agent/data/.hdfs_resource_ignore",
-  security_enabled=security_enabled,
-  keytab=hdfs_user_keytab,
-  kinit_path_local=kinit_path_local,
-  hadoop_bin_dir=hadoop_bin_dir,
-  hadoop_conf_dir=hadoop_conf_dir,
-  principal_name=hdfs_principal_name,
-  hdfs_site=hdfs_site,
-  default_fs=default_fs,
-  immutable_paths=get_not_managed_resources(),
-  dfs_type=dfs_type
-)

+ 0 - 28
ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/router.py

@@ -1,28 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from druid_node import DruidBase
-
-
-class DruidRouter(DruidBase):
-  def __init__(self):
-    DruidBase.__init__(self, nodeType="router")
-
-
-if __name__ == "__main__":
-  DruidRouter().execute()

+ 0 - 44
ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/service_check.py

@@ -1,44 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions.format import format
-from resource_management.core.resources.system import Execute
-
-
-class ServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    self.checkComponent(params, "druid_coordinator", "druid-coordinator")
-    self.checkComponent(params, "druid_overlord", "druid-overlord")
-
-  def checkComponent(self, params, component_name, config_name):
-    component_port = params.config['configurations'][format('{config_name}')]['druid.port']
-    for component_host in params.config['clusterHostInfo'][format('{component_name}_hosts')]:
-      Execute(format(
-        "curl -s -o /dev/null -w'%{{http_code}}' --negotiate -u: -k {component_host}:{component_port}/status | grep 200"),
-        tries=10,
-        try_sleep=3,
-        logoutput=True)
-
-
-if __name__ == "__main__":
-  ServiceCheck().execute()

+ 0 - 24
ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/status_params.py

@@ -1,24 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from resource_management.libraries.script.script import Script
-
-config = Script.get_config()
-
-druid_pid_dir = config['configurations']['druid-env']['druid_pid_dir']

+ 0 - 37
ambari-server/src/main/resources/common-services/DRUID/0.9.2/quicklinks/quicklinks.json

@@ -1,37 +0,0 @@
-{
-  "name": "default",
-  "description": "default quick links configuration",
-  "configuration": {
-    "protocol": {
-      "type": "HTTP_ONLY"
-    },
-    "links": [
-      {
-        "name": "coordinator_console",
-        "label": "Druid Coordinator Console",
-        "component_name": "DRUID_COORDINATOR",
-        "requires_user_name": "false",
-        "url": "%@://%@:%@",
-        "port": {
-          "http_property": "druid.port",
-          "http_default_port": "8081",
-          "regex": "^(\\d+)$",
-          "site": "druid-coordinator"
-        }
-      },
-      {
-        "name": "overlord_console",
-        "label": "Druid Overlord Console",
-        "component_name": "DRUID_OVERLORD",
-        "requires_user_name": "false",
-        "url": "%@://%@:%@",
-        "port": {
-          "http_property": "druid.port",
-          "http_default_port": "8090",
-          "regex": "^(\\d+)$",
-          "site": "druid-overlord"
-        }
-      }
-    ]
-  }
-}

+ 0 - 120
ambari-server/src/main/resources/common-services/DRUID/0.9.2/themes/theme.json

@@ -1,120 +0,0 @@
-{
-  "name": "default",
-  "description": "Default theme for Druid service",
-  "configuration": {
-    "layouts": [
-      {
-        "name": "default",
-        "tabs": [
-          {
-            "name": "metadata_storage",
-            "display-name": "META DATA STORAGE CONFIG",
-            "layout": {
-              "tab-columns": "1",
-              "tab-rows": "1",
-              "sections": [
-                {
-                  "name": "section-metadata-storage",
-                  "display-name": "",
-                  "row-index": "0",
-                  "column-index": "0",
-                  "row-span": "2",
-                  "column-span": "1",
-                  "section-columns": "1",
-                  "section-rows": "1",
-                  "subsections": [
-                    {
-                      "name": "subsection-metadata-storage-row1-col1",
-                      "display-name": "META DATA STORAGE",
-                      "row-index": "0",
-                      "column-index": "0",
-                      "row-span": "1",
-                      "column-span": "1"
-                    }
-                  ]
-                }
-              ]
-            }
-          }
-        ]
-      }
-    ],
-    "placement": {
-      "configuration-layout": "default",
-      "configs": [
-        {
-          "config": "druid-common/database_name",
-          "subsection-name": "subsection-metadata-storage-row1-col1"
-        },
-        {
-          "config": "druid-common/druid.metadata.storage.type",
-          "subsection-name": "subsection-metadata-storage-row1-col1"
-        },
-        {
-          "config": "druid-common/druid.metadata.storage.connector.user",
-          "subsection-name": "subsection-metadata-storage-row1-col1"
-        },
-        {
-          "config": "druid-common/druid.metadata.storage.connector.password",
-          "subsection-name": "subsection-metadata-storage-row1-col1"
-        },
-        {
-          "config": "druid-common/metastore_hostname",
-          "subsection-name": "subsection-metadata-storage-row1-col1"
-        },
-        {
-          "config": "druid-common/druid.metadata.storage.connector.port",
-          "subsection-name": "subsection-metadata-storage-row1-col1"
-        },
-        {
-          "config": "druid-common/druid.metadata.storage.connector.connectURI",
-          "subsection-name": "subsection-metadata-storage-row1-col1"
-        }
-      ]
-    },
-    "widgets": [
-      {
-        "config": "druid-common/database_name",
-        "widget": {
-          "type": "text-field"
-        }
-      },
-      {
-        "config": "druid-common/druid.metadata.storage.type",
-        "widget": {
-          "type": "combo"
-        }
-      },
-      {
-        "config": "druid-common/druid.metadata.storage.connector.user",
-        "widget": {
-          "type": "text-field"
-        }
-      },
-      {
-        "config": "druid-common/druid.metadata.storage.connector.password",
-        "widget": {
-          "type": "password"
-        }
-      },
-      {
-        "config": "druid-common/metastore_hostname",
-        "widget": {
-          "type": "text-field"
-        }
-      },
-      {
-        "config": "druid-common/druid.metadata.storage.connector.port",
-        "widget": {
-          "type": "text-field"
-        }
-      },
-      {
-        "config": "druid-common/druid.metadata.storage.connector.connectURI",
-        "widget": {
-          "type": "text-field"
-        }
-      }
-    ]
-  }
-}

+ 0 - 19
ambari-server/src/main/resources/stacks/HDP/2.6/role_command_order.json

@@ -1,19 +0,0 @@
-{
-  "_comment" : "Record format:",
-  "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
-  "general_deps" : {
-    "_comment" : "dependencies for all cases",
-    "DRUID_HISTORICAL-START" : ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START"],
-    "DRUID_OVERLORD-START" : ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START"],
-    "DRUID_MIDDLEMANAGER-START" : ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START"],
-    "DRUID_BROKER-START" : ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START"],
-    "DRUID_ROUTER-START" : ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START"],
-    "DRUID_COORDINATOR-START" : ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START"],
-    "DRUID_OVERLORD-RESTART" : ["DRUID_HISTORICAL-RESTART"],
-    "DRUID_MIDDLEMANAGER-RESTART" : ["DRUID_OVERLORD-RESTART"],
-    "DRUID_BROKER-RESTART" : ["DRUID_MIDDLEMANAGER-RESTART"],
-    "DRUID_ROUTER-RESTART" : ["DRUID_BROKER-RESTART"],
-    "DRUID_COORDINATOR-RESTART" : ["DRUID_ROUTER-RESTART"],
-    "DRUID_SERVICE_CHECK-SERVICE_CHECK" : ["DRUID_HISTORICAL-START", "DRUID_COORDINATOR-START", "DRUID_OVERLORD-START", "DRUID_MIDDLEMANAGER-START", "DRUID_BROKER-START", "DRUID_ROUTER-START"]
-  }
-}

+ 0 - 78
ambari-server/src/main/resources/stacks/HDP/2.6/services/DRUID/kerberos.json

@@ -1,78 +0,0 @@
-{
-  "services": [
-    {
-      "name": "DRUID",
-      "identities": [
-        {
-          "name": "/spnego"
-        },
-        {
-          "name": "druid",
-          "principal": {
-            "value": "${druid-env/druid_user}@${realm}",
-            "type": "user",
-            "configuration": "druid-common/druid.hadoop.security.kerberos.principal",
-            "local_username": "${druid-env/druid_user}"
-          },
-          "keytab": {
-            "file": "${keytab_dir}/druid.headless.keytab",
-            "owner": {
-              "name": "${druid-env/druid_user}",
-              "access": "r"
-            },
-            "group": {
-              "name": "${cluster-env/user_group}",
-              "access": "r"
-            },
-            "configuration": "druid-common/druid.hadoop.security.kerberos.keytab"
-          }
-        },
-        {
-          "name": "/smokeuser"
-        }
-      ],
-      "components": [
-        {
-          "name": "DRUID_HISTORICAL",
-          "identities": [
-            {
-              "name": "/druid"
-            }
-          ]
-        },
-        {
-          "name": "DRUID_BROKER",
-          "identities": [
-            {
-              "name": "/druid"
-            }
-          ]
-        },
-        {
-          "name": "DRUID_OVERLORD",
-          "identities": [
-            {
-              "name": "/druid"
-            }
-          ]
-        },
-        {
-          "name": "DRUID_COORDINATOR",
-          "identities": [
-            {
-              "name": "/druid"
-            }
-          ]
-        },
-        {
-          "name": "DRUID_MIDDLEMANAGER",
-          "identities": [
-            {
-              "name": "/druid"
-            }
-          ]
-        }
-      ]
-    }
-  ]
-}

+ 0 - 27
ambari-server/src/main/resources/stacks/HDP/2.6/services/DRUID/metainfo.xml

@@ -1,27 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>DRUID</name>
-      <version>0.9.2.2.6</version>
-      <extends>common-services/DRUID/0.9.2</extends>
-    </service>
-  </services>
-</metainfo>

+ 1 - 186
ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py

@@ -16,191 +16,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 """
-from resource_management.core.logger import Logger
-import json
-from resource_management.libraries.functions import format
-
 
 class HDP26StackAdvisor(HDP25StackAdvisor):
-    def __init__(self):
-        super(HDP26StackAdvisor, self).__init__()
-        Logger.initialize_logger()
-
-    def getServiceConfigurationRecommenderDict(self):
-        parentRecommendConfDict = super(HDP26StackAdvisor, self).getServiceConfigurationRecommenderDict()
-        childRecommendConfDict = {
-            "DRUID": self.recommendDruidConfigurations
-        }
-        parentRecommendConfDict.update(childRecommendConfDict)
-        return parentRecommendConfDict
-
-    def recommendDruidConfigurations(self, configurations, clusterData, services, hosts):
-
-        componentsListList = [service["components"] for service in services["services"]]
-        componentsList = [item["StackServiceComponents"] for sublist in componentsListList for item in sublist]
-        servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
-        putCommonProperty = self.putProperty(configurations, "druid-common", services)
-
-        putCommonProperty('druid.zk.service.host', self.getZKHostPortString(services))
-        self.recommendDruidMaxMemoryLimitConfigurations(configurations, clusterData, services, hosts)
-
-        # recommending the metadata storage uri
-        database_name = services['configurations']["druid-common"]["properties"]["database_name"]
-        metastore_hostname = services['configurations']["druid-common"]["properties"]["metastore_hostname"]
-        database_type = services['configurations']["druid-common"]["properties"]["druid.metadata.storage.type"]
-        metadata_storage_port = "1527"
-        mysql_extension_name = "io.druid.extensions:mysql-metadata-storage"
-        mysql_module_name = "mysql-metadata-storage"
-        postgres_module_name = "postgresql-metadata-storage"
-        extensions_load_list = services['configurations']['druid-common']['properties']['druid.extensions.loadList']
-        extensions_pull_list = services['configurations']['druid-common']['properties']['druid.extensions.pullList']
-        putDruidCommonProperty = self.putProperty(configurations, "druid-common", services)
-
-        extensions_pull_list = self.removeFromList(extensions_pull_list, mysql_extension_name)
-        extensions_load_list = self.removeFromList(extensions_load_list, mysql_module_name)
-        extensions_load_list = self.removeFromList(extensions_load_list, postgres_module_name)
-
-        if database_type == 'mysql':
-            metadata_storage_port = "3306"
-            extensions_pull_list = self.addToList(extensions_pull_list, mysql_extension_name)
-            extensions_load_list = self.addToList(extensions_load_list, mysql_module_name)
-
-        if database_type == 'postgres':
-            extensions_load_list = self.addToList(extensions_load_list, postgres_module_name)
-            metadata_storage_port = "5432"
-
-        putDruidCommonProperty('druid.metadata.storage.connector.port', metadata_storage_port)
-        putDruidCommonProperty('druid.metadata.storage.connector.connectURI',
-                               self.getMetadataConnectionString(database_type).format(metastore_hostname, database_name,
-                                                                                      metadata_storage_port))
-        # HDFS is installed
-        if "HDFS" in servicesList and "hdfs-site" in services["configurations"]:
-            # recommend HDFS as default deep storage
-            extensions_load_list = self.addToList(extensions_load_list, "druid-hdfs-storage")
-            putCommonProperty("druid.storage.type", "hdfs")
-            putCommonProperty("druid.storage.storageDirectory", "/user/druid/data")
-            # configure indexer logs configs
-            putCommonProperty("druid.indexer.logs.type", "hdfs")
-            putCommonProperty("druid.indexer.logs.directory", "/user/druid/logs")
-
-        if "KAFKA" in servicesList:
-            extensions_load_list = self.addToList(extensions_load_list, "druid-kafka-indexing-service")
-
-        putCommonProperty('druid.extensions.loadList', extensions_load_list)
-        putCommonProperty('druid.extensions.pullList', extensions_pull_list)
-
-        # JVM Configs go to env properties
-        putEnvProperty = self.putProperty(configurations, "druid-env", services)
-
-        # processing thread pool Config
-        for component in ['DRUID_HISTORICAL', 'DRUID_BROKER']:
-            component_hosts = self.getHostsWithComponent("DRUID", component, services, hosts)
-            nodeType = self.DRUID_COMPONENT_NODE_TYPE_MAP[component]
-            putComponentProperty = self.putProperty(configurations, format("druid-{nodeType}"), services)
-            if (component_hosts is not None and len(component_hosts) > 0):
-                totalAvailableCpu = self.getMinCpu(component_hosts)
-                processingThreads = 1
-                if totalAvailableCpu > 1:
-                    processingThreads = totalAvailableCpu - 1
-                putComponentProperty('druid.processing.numThreads', processingThreads)
-                putComponentProperty('druid.server.http.numThreads', max(10, (totalAvailableCpu * 17) / 16 + 2) + 30)
-
-    def getMetadataConnectionString(self, database_type):
-        driverDict = {
-            'mysql': 'jdbc:mysql://{0}:{2}/{1}?createDatabaseIfNotExist=true',
-            'derby': 'jdbc:derby://{0}:{2}/{1};create=true',
-            'postgres': 'jdbc:postgresql://{0}:{2}/{1}'
-        }
-        return driverDict.get(database_type.lower())
-
-    def addToList(self, json_list, word):
-        desr_list = json.loads(json_list)
-        if word not in desr_list:
-            desr_list.append(word)
-        return json.dumps(desr_list)
-
-    def removeFromList(self, json_list, word):
-        desr_list = json.loads(json_list)
-        if word in desr_list:
-            desr_list.remove(word)
-        return json.dumps(desr_list)
-
-    def recommendDruidMaxMemoryLimitConfigurations(self, configurations, clusterData, services, hosts):
-        putEnvPropertyAttribute = self.putPropertyAttribute(configurations, "druid-env")
-        for component in ["DRUID_HISTORICAL", "DRUID_MIDDLEMANAGER", "DRUID_BROKER", "DRUID_OVERLORD",
-                          "DRUID_COORDINATOR"]:
-            component_hosts = self.getHostsWithComponent("DRUID", component, services, hosts)
-            if component_hosts is not None and len(component_hosts) > 0:
-                totalAvailableMem = self.getMinMemory(component_hosts) / 1024  # In MB
-                nodeType = self.DRUID_COMPONENT_NODE_TYPE_MAP[component]
-                putEnvPropertyAttribute(format('druid.{nodeType}.jvm.heap.memory'), 'maximum',
-                                        max(totalAvailableMem, 1024))
-
-    DRUID_COMPONENT_NODE_TYPE_MAP = {
-        'DRUID_BROKER': 'broker',
-        'DRUID_COORDINATOR': 'coordinator',
-        'DRUID_HISTORICAL': 'historical',
-        'DRUID_MIDDLEMANAGER': 'middlemanager',
-        'DRUID_OVERLORD': 'overlord',
-        'DRUID_ROUTER': 'router'
-    }
-
-    def getMinMemory(self, component_hosts):
-        min_ram_kb = 1073741824  # 1 TB
-        for host in component_hosts:
-            ram_kb = host['Hosts']['total_mem']
-            min_ram_kb = min(min_ram_kb, ram_kb)
-        return min_ram_kb
-
-    def getMinCpu(self, component_hosts):
-        min_cpu = 256
-        for host in component_hosts:
-            cpu_count = host['Hosts']['cpu_count']
-            min_cpu = min(min_cpu, cpu_count)
-        return min_cpu
-
-    def getServiceConfigurationValidators(self):
-        parentValidators = super(HDP26StackAdvisor, self).getServiceConfigurationValidators()
-        childValidators = {
-            "DRUID": {"druid-env": self.validateDruidEnvConfigurations,
-                      "druid-historical": self.validateDruidHistoricalConfigurations,
-                      "druid-broker": self.validateDruidBrokerConfigurations}
-        }
-        self.mergeValidators(parentValidators, childValidators)
-        return parentValidators
-
-    def validateDruidEnvConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
-        validationItems = []
-        #  Minimum Direct memory Validation
-        envProperties = services['configurations']['druid-env']['properties']
-        for nodeType in ['broker', 'historical']:
-            properties = services['configurations'][format('druid-{nodeType}')]['properties']
-            intermediateBufferSize = int(properties['druid.processing.buffer.sizeBytes']) / (1024 * 1024)  # In MBs
-            processingThreads = int(properties['druid.processing.numThreads'])
-            directMemory = int(envProperties[format('druid.{nodeType}.jvm.direct.memory')])
-            if directMemory < (processingThreads + 1) * intermediateBufferSize:
-                validationItems.extend(
-                    {"config-name": format("druid.{nodeType}.jvm.direct.memory"), "item": self.getErrorItem(
-                        format(
-                            "Not enough direct memory available for {nodeType} Node."
-                            "Please adjust druid.{nodeType}.jvm.direct.memory, druid.processing.buffer.sizeBytes, druid.processing.numThreads"
-                        )
-                    )
-                     })
-        return self.toConfigurationValidationProblems(validationItems, "druid-env")
-
-    def validateDruidHistoricalConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
-        validationItems = [
-            {"config-name": "druid.processing.numThreads",
-             "item": self.validatorEqualsToRecommendedItem(properties, recommendedDefaults,
-                                                           "druid.processing.numThreads")}
-        ]
-        return self.toConfigurationValidationProblems(validationItems, "druid-historical")
-
-    def validateDruidBrokerConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
-        validationItems = [
-            {"config-name": "druid.processing.numThreads",
-             "item": self.validatorEqualsToRecommendedItem(properties, recommendedDefaults,
-                                                           "druid.processing.numThreads")}
-        ]
-        return self.toConfigurationValidationProblems(validationItems, "druid-broker")
+  pass

+ 0 - 647
ambari-server/src/test/python/stacks/2.6/DRUID/test_druid.py

@@ -1,647 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import os
-
-from stacks.utils.RMFTestCase import *
-
-from mock.mock import MagicMock, patch
-from resource_management.libraries import functions
-from resource_management.libraries.functions import format
-from resource_management.core.logger import Logger
-
-@patch("resource_management.libraries.Script.get_tmp_dir", new=MagicMock(return_value=('/var/lib/ambari-agent/tmp')))
-@patch.object(functions, "get_stack_version", new=MagicMock(return_value="2.0.0.0-1234"))
-class TestDruid(RMFTestCase):
-  COMMON_SERVICES_PACKAGE_DIR = "DRUID/0.9.2/package"
-  STACK_VERSION = "2.6"
-  DEFAULT_IMMUTABLE_PATHS = ['/apps/hive/warehouse', '/apps/falcon', '/mr-history/done', '/app-logs', '/tmp']
-
-  def setUp(self):
-    Logger.logger = MagicMock()
-    self.testDirectory = os.path.dirname(os.path.abspath(__file__))
-    self.num_times_to_iterate = 3
-    self.wait_time = 1
-
-  def test_configure_overlord(self):
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/overlord.py",
-                       classname="DruidOverlord",
-                       command="configure",
-                       config_file=self.get_src_folder() + "/test/python/stacks/2.6/configs/default.json",
-                       config_overrides = { 'role' : 'DRUID_OVERLORD' },
-                       stack_version=self.STACK_VERSION,
-                       target=RMFTestCase.TARGET_COMMON_SERVICES
-                       )
-    self.assert_configure_default('druid-overlord')
-    self.assertNoMoreResources()
-
-  def test_start_overlord(self):
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/overlord.py",
-                       classname="DruidOverlord",
-                       command="start",
-                       config_file=self.get_src_folder() + "/test/python/stacks/2.6/configs/default.json",
-                       stack_version=self.STACK_VERSION,
-                       config_overrides = { 'role' : 'DRUID_OVERLORD' },
-                       target=RMFTestCase.TARGET_COMMON_SERVICES
-                       )
-    self.assert_configure_default('druid-overlord')
-    self.assertResourceCalled('Execute', format('source /usr/hdp/current/druid-overlord/conf/druid-env.sh ; /usr/hdp/current/druid-overlord/bin/node.sh overlord start'),
-                              user='druid'
-                              )
-    self.assertNoMoreResources()
-
-  def test_stop_overlord(self):
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/overlord.py",
-                       classname="DruidOverlord",
-                       command="stop",
-                       config_file=self.get_src_folder() + "/test/python/stacks/2.6/configs/default.json",
-                       stack_version=self.STACK_VERSION,
-                       config_overrides = { 'role' : 'DRUID_OVERLORD' },
-                       target=RMFTestCase.TARGET_COMMON_SERVICES
-                       )
-    self.assertResourceCalled('Execute', format('source /usr/hdp/current/druid-overlord/conf/druid-env.sh ; /usr/hdp/current/druid-overlord/bin/node.sh overlord stop'),
-                              user='druid'
-                              )
-    self.assertNoMoreResources()
-
-  def test_configure_coordinator(self):
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/coordinator.py",
-                       classname="DruidCoordinator",
-                       command="configure",
-                       config_file=self.get_src_folder() + "/test/python/stacks/2.6/configs/default.json",
-                       config_overrides = { 'role' : 'DRUID_COORDINATOR' },
-                       stack_version=self.STACK_VERSION,
-                       target=RMFTestCase.TARGET_COMMON_SERVICES
-                       )
-    self.assert_configure_default('druid-coordinator')
-    self.assertNoMoreResources()
-
-  def test_start_coordinator(self):
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/coordinator.py",
-                       classname="DruidCoordinator",
-                       command="start",
-                       config_file=self.get_src_folder() + "/test/python/stacks/2.6/configs/default.json",
-                       stack_version=self.STACK_VERSION,
-                       config_overrides = { 'role' : 'DRUID_COORDINATOR' },
-                       target=RMFTestCase.TARGET_COMMON_SERVICES
-                       )
-    self.assert_configure_default('druid-coordinator')
-    self.assertResourceCalled('Execute', format('source /usr/hdp/current/druid-coordinator/conf/druid-env.sh ; /usr/hdp/current/druid-coordinator/bin/node.sh coordinator start'),
-                              user='druid'
-                              )
-    self.assertNoMoreResources()
-
-  def test_stop_coordinator(self):
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/coordinator.py",
-                       classname="DruidCoordinator",
-                       command="stop",
-                       config_file=self.get_src_folder() + "/test/python/stacks/2.6/configs/default.json",
-                       stack_version=self.STACK_VERSION,
-                       config_overrides = { 'role' : 'DRUID_COORDINATOR' },
-                       target=RMFTestCase.TARGET_COMMON_SERVICES
-                       )
-    self.assertResourceCalled('Execute', format('source /usr/hdp/current/druid-coordinator/conf/druid-env.sh ; /usr/hdp/current/druid-coordinator/bin/node.sh coordinator stop'),
-                              user='druid'
-                              )
-    self.assertNoMoreResources()
-
-  def test_configure_broker(self):
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/broker.py",
-                       classname="DruidBroker",
-                       command="configure",
-                       config_file=self.get_src_folder() + "/test/python/stacks/2.6/configs/default.json",
-                       config_overrides = { 'role' : 'DRUID_BROKER' },
-                       stack_version=self.STACK_VERSION,
-                       target=RMFTestCase.TARGET_COMMON_SERVICES
-                       )
-    self.assert_configure_default('druid-broker')
-    self.assertNoMoreResources()
-
-  def test_start_broker(self):
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/broker.py",
-                       classname="DruidBroker",
-                       command="start",
-                       config_file=self.get_src_folder() + "/test/python/stacks/2.6/configs/default.json",
-                       stack_version=self.STACK_VERSION,
-                       config_overrides = { 'role' : 'DRUID_BROKER' },
-                       target=RMFTestCase.TARGET_COMMON_SERVICES
-                       )
-    self.assert_configure_default('druid-broker')
-    self.assertResourceCalled('Execute', format('source /usr/hdp/current/druid-broker/conf/druid-env.sh ; /usr/hdp/current/druid-broker/bin/node.sh broker start'),
-                              user='druid'
-                              )
-    self.assertNoMoreResources()
-
-  def test_stop_broker(self):
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/broker.py",
-                       classname="DruidBroker",
-                       command="stop",
-                       config_file=self.get_src_folder() + "/test/python/stacks/2.6/configs/default.json",
-                       stack_version=self.STACK_VERSION,
-                       config_overrides = { 'role' : 'DRUID_BROKER' },
-                       target=RMFTestCase.TARGET_COMMON_SERVICES
-                       )
-    self.assertResourceCalled('Execute', format('source /usr/hdp/current/druid-broker/conf/druid-env.sh ; /usr/hdp/current/druid-broker/bin/node.sh broker stop'),
-                              user='druid'
-                              )
-    self.assertNoMoreResources()
-
-  def test_configure_router(self):
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/router.py",
-                       classname="DruidRouter",
-                       command="configure",
-                       config_file=self.get_src_folder() + "/test/python/stacks/2.6/configs/default.json",
-                       config_overrides = { 'role' : 'DRUID_ROUTER' },
-                       stack_version=self.STACK_VERSION,
-                       target=RMFTestCase.TARGET_COMMON_SERVICES
-                       )
-    self.assert_configure_default('druid-router')
-    self.assertNoMoreResources()
-
-  def test_start_router(self):
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/router.py",
-                       classname="DruidRouter",
-                       command="start",
-                       config_file=self.get_src_folder() + "/test/python/stacks/2.6/configs/default.json",
-                       stack_version=self.STACK_VERSION,
-                       config_overrides = { 'role' : 'DRUID_ROUTER' },
-                       target=RMFTestCase.TARGET_COMMON_SERVICES
-                       )
-    self.assert_configure_default('druid-router')
-    self.assertResourceCalled('Execute', format('source /usr/hdp/current/druid-router/conf/druid-env.sh ; /usr/hdp/current/druid-router/bin/node.sh router start'),
-                              user='druid'
-                              )
-    self.assertNoMoreResources()
-
-  def test_stop_router(self):
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/router.py",
-                       classname="DruidRouter",
-                       command="stop",
-                       config_file=self.get_src_folder() + "/test/python/stacks/2.6/configs/default.json",
-                       stack_version=self.STACK_VERSION,
-                       config_overrides = { 'role' : 'DRUID_ROUTER' },
-                       target=RMFTestCase.TARGET_COMMON_SERVICES
-                       )
-    self.assertResourceCalled('Execute', format('source /usr/hdp/current/druid-router/conf/druid-env.sh ; /usr/hdp/current/druid-router/bin/node.sh router stop'),
-                              user='druid'
-                              )
-    self.assertNoMoreResources()
-
-  def test_configure_historical(self):
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/historical.py",
-                       classname="DruidHistorical",
-                       command="configure",
-                       config_file=self.get_src_folder() + "/test/python/stacks/2.6/configs/default.json",
-                       config_overrides = { 'role' : 'DRUID_HISTORICAL' },
-                       stack_version=self.STACK_VERSION,
-                       target=RMFTestCase.TARGET_COMMON_SERVICES
-                       )
-    self.assert_configure_default('druid-historical')
-    self.assertNoMoreResources()
-
-  def test_start_historical(self):
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/historical.py",
-                       classname="DruidHistorical",
-                       command="start",
-                       config_file=self.get_src_folder() + "/test/python/stacks/2.6/configs/default.json",
-                       stack_version=self.STACK_VERSION,
-                       config_overrides = { 'role' : 'DRUID_HISTORICAL' },
-                       target=RMFTestCase.TARGET_COMMON_SERVICES
-                       )
-    self.assert_configure_default('druid-historical')
-    self.assertResourceCalled('Execute', format('source /usr/hdp/current/druid-historical/conf/druid-env.sh ; /usr/hdp/current/druid-historical/bin/node.sh historical start'),
-                              user='druid'
-                              )
-    self.assertNoMoreResources()
-
-  def test_stop_historical(self):
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/historical.py",
-                       classname="DruidHistorical",
-                       command="stop",
-                       config_file=self.get_src_folder() + "/test/python/stacks/2.6/configs/default.json",
-                       stack_version=self.STACK_VERSION,
-                       config_overrides = { 'role' : 'DRUID_HISTORICAL' },
-                       target=RMFTestCase.TARGET_COMMON_SERVICES
-                       )
-    self.assertResourceCalled('Execute', format('source /usr/hdp/current/druid-historical/conf/druid-env.sh ; /usr/hdp/current/druid-historical/bin/node.sh historical stop'),
-                              user='druid'
-                              )
-    self.assertNoMoreResources()
-
-  def test_configure_middleManager(self):
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/middlemanager.py",
-                       classname="DruidMiddleManager",
-                       command="configure",
-                       config_file=self.get_src_folder() + "/test/python/stacks/2.6/configs/default.json",
-                       config_overrides = { 'role' : 'DRUID_MIDDLEMANAGER' },
-                       stack_version=self.STACK_VERSION,
-                       target=RMFTestCase.TARGET_COMMON_SERVICES
-                       )
-    self.assert_configure_default('druid-middlemanager')
-    self.assertNoMoreResources()
-
-  def test_start_middleManager(self):
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/middlemanager.py",
-                       classname="DruidMiddleManager",
-                       command="start",
-                       config_file=self.get_src_folder() + "/test/python/stacks/2.6/configs/default.json",
-                       stack_version=self.STACK_VERSION,
-                       config_overrides = { 'role' : 'DRUID_MIDDLEMANAGER' },
-                       target=RMFTestCase.TARGET_COMMON_SERVICES
-                       )
-    self.assert_configure_default('druid-middlemanager')
-    self.assertResourceCalled('Execute', format('source /usr/hdp/current/druid-middlemanager/conf/druid-env.sh ; /usr/hdp/current/druid-middlemanager/bin/node.sh middleManager start'),
-                              user='druid'
-                              )
-    self.assertNoMoreResources()
-
-  def test_stop_middleManager(self):
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/middlemanager.py",
-                       classname="DruidMiddleManager",
-                       command="stop",
-                       config_file=self.get_src_folder() + "/test/python/stacks/2.6/configs/default.json",
-                       stack_version=self.STACK_VERSION,
-                       config_overrides = { 'role' : 'DRUID_MIDDLEMANAGER' },
-                       target=RMFTestCase.TARGET_COMMON_SERVICES
-                       )
-    self.assertResourceCalled('Execute', format('source /usr/hdp/current/druid-middlemanager/conf/druid-env.sh ; /usr/hdp/current/druid-middlemanager/bin/node.sh middleManager stop'),
-                              user='druid'
-                              )
-    self.assertNoMoreResources()
-
-  def assert_configure_default(self, role):
-
-    self.assertResourceCalled('Directory', '/var/log/druid',
-                              mode=0755,
-                              cd_access='a',
-                              owner='druid',
-                              group='hadoop',
-                              create_parents=True,
-                              recursive_ownership=True
-                              )
-
-    self.assertResourceCalled('Directory', '/var/run/druid',
-                              mode=0755,
-                              cd_access='a',
-                              owner='druid',
-                              group='hadoop',
-                              create_parents=True,
-                              recursive_ownership=True
-                              )
-
-    self.assertResourceCalled('Directory', format('/usr/hdp/current/{role}/conf/_common'),
-                              mode=0755,
-                              cd_access='a',
-                              owner='druid',
-                              group='hadoop',
-                              create_parents=True,
-                              recursive_ownership=True
-                              )
-
-    self.assertResourceCalled('Directory', format('/usr/hdp/current/{role}/conf/coordinator'),
-                              mode=0755,
-                              cd_access='a',
-                              owner='druid',
-                              group='hadoop',
-                              create_parents=True,
-                              recursive_ownership=True
-                              )
-
-    self.assertResourceCalled('Directory', format('/usr/hdp/current/{role}/conf/broker'),
-                              mode=0755,
-                              cd_access='a',
-                              owner='druid',
-                              group='hadoop',
-                              create_parents=True,
-                              recursive_ownership=True
-                              )
-
-    self.assertResourceCalled('Directory', format('/usr/hdp/current/{role}/conf/middleManager'),
-                              mode=0755,
-                              cd_access='a',
-                              owner='druid',
-                              group='hadoop',
-                              create_parents=True,
-                              recursive_ownership=True
-                              )
-
-    self.assertResourceCalled('Directory', format('/usr/hdp/current/{role}/conf/historical'),
-                              mode=0755,
-                              cd_access='a',
-                              owner='druid',
-                              group='hadoop',
-                              create_parents=True,
-                              recursive_ownership=True
-                              )
-
-    self.assertResourceCalled('Directory', format('/usr/hdp/current/{role}/conf/overlord'),
-                              mode=0755,
-                              cd_access='a',
-                              owner='druid',
-                              group='hadoop',
-                              create_parents=True,
-                              recursive_ownership=True
-                              )
-
-    self.assertResourceCalled('Directory', format('/usr/hdp/current/{role}/conf/router'),
-                              mode=0755,
-                              cd_access='a',
-                              owner='druid',
-                              group='hadoop',
-                              create_parents=True,
-                              recursive_ownership=True
-                              )
-
-    self.assertResourceCalled('Directory', '/apps/druid/segmentCache',
-                              mode=0755,
-                              cd_access='a',
-                              owner='druid',
-                              group='hadoop',
-                              create_parents=True,
-                              recursive_ownership=True
-                              )
-
-    self.assertResourceCalled('File', format('/usr/hdp/current/{role}/conf/druid-env.sh'),
-                              owner = 'druid',
-                              content = InlineTemplate(self.getConfig()['configurations']['druid-env']['content'])
-                              )
-    druid_common_config = mutable_config_dict(self.getConfig()['configurations']['druid-common'])
-    druid_common_config['druid.host'] = 'c6401.ambari.apache.org'
-    druid_common_config['druid.extensions.directory'] = format('/usr/hdp/current/{role}/extensions')
-    druid_common_config['druid.extensions.hadoopDependenciesDir'] = format('/usr/hdp/current/{role}/hadoop-dependencies')
-    druid_common_config['druid.selectors.indexing.serviceName'] = 'druid/overlord'
-    druid_common_config['druid.selectors.coordinator.serviceName'] = 'druid/coordinator'
-
-    self.assertResourceCalled('PropertiesFile', 'common.runtime.properties',
-                              dir=format("/usr/hdp/current/{role}/conf/_common"),
-                              properties=druid_common_config,
-                              owner='druid',
-                              group='hadoop'
-                              )
-
-    self.assertResourceCalled('File', format('/usr/hdp/current/{role}/conf/_common/druid-log4j.xml'),
-                              mode=0644,
-                              owner = 'druid',
-                              group = 'hadoop',
-                              content = InlineTemplate(self.getConfig()['configurations']['druid-log4j']['content'])
-                              )
-
-    self.assertResourceCalled('File', '/etc/logrotate.d/druid',
-                              mode=0644,
-                              owner = 'root',
-                              group = 'root',
-                              content = InlineTemplate(self.getConfig()['configurations']['druid-logrotate']['content'])
-                              )
-
-    self.assertResourceCalled('XmlConfig', "core-site.xml",
-                              conf_dir=format('/usr/hdp/current/{role}/conf/_common'),
-                              configurations=self.getConfig()['configurations']['core-site'],
-                              configuration_attributes=self.getConfig()['configuration_attributes']['core-site'],
-                              owner='druid',
-                              group='hadoop'
-                              )
-
-    self.assertResourceCalled('XmlConfig', "yarn-site.xml",
-                              conf_dir=format('/usr/hdp/current/{role}/conf/_common'),
-                              configurations=self.getConfig()['configurations']['yarn-site'],
-                              configuration_attributes=self.getConfig()['configuration_attributes']['yarn-site'],
-                              owner='druid',
-                              group='hadoop'
-                              )
-
-    self.assertResourceCalled('XmlConfig', "hdfs-site.xml",
-                              conf_dir=format('/usr/hdp/current/{role}/conf/_common'),
-                              configurations=self.getConfig()['configurations']['hdfs-site'],
-                              configuration_attributes=self.getConfig()['configuration_attributes']['hdfs-site'],
-                              owner='druid',
-                              group='hadoop'
-                              )
-
-    self.assertResourceCalled('PropertiesFile', "runtime.properties",
-                              dir=format('/usr/hdp/current/{role}/conf/coordinator'),
-                              properties=self.getConfig()['configurations']['druid-coordinator'],
-                              owner='druid',
-                              group='hadoop'
-                              )
-
-    self.assertResourceCalled('File', format("/usr/hdp/current/{role}/conf/coordinator/jvm.config"),
-                              owner='druid',
-                              group='hadoop',
-                              content=InlineTemplate("-server \n-Xms{{node_heap_memory}}m \n-Xmx{{node_heap_memory}}m \n-XX:MaxDirectMemorySize={{node_direct_memory}}m \n-Dlog4j.configurationFile={{log4j_config_file}} \n-Dlog4j.debug \n{{node_jvm_opts}}",
-                                                     node_heap_memory=1024,
-                                                     node_direct_memory=2048,
-                                                     node_jvm_opts='-Duser.timezone=UTC -Dfile.encoding=UTF-8',
-                                                     log4j_config_file=format('/usr/hdp/current/{role}/conf/_common/druid-log4j.xml')
-                                                     )
-                              )
-
-    self.assertResourceCalled('PropertiesFile', "runtime.properties",
-                              dir=format('/usr/hdp/current/{role}/conf/overlord'),
-                              properties=self.getConfig()['configurations']['druid-overlord'],
-                              owner='druid',
-                              group='hadoop'
-                              )
-
-    self.assertResourceCalled('File', format("/usr/hdp/current/{role}/conf/overlord/jvm.config"),
-                              owner='druid',
-                              group='hadoop',
-                              content=InlineTemplate("-server \n-Xms{{node_heap_memory}}m \n-Xmx{{node_heap_memory}}m \n-XX:MaxDirectMemorySize={{node_direct_memory}}m \n-Dlog4j.configurationFile={{log4j_config_file}} \n-Dlog4j.debug \n{{node_jvm_opts}}",
-                                                     node_heap_memory=1024,
-                                                     node_direct_memory=2048,
-                                                     node_jvm_opts='-Duser.timezone=UTC -Dfile.encoding=UTF-8',
-                                                     log4j_config_file=format('/usr/hdp/current/{role}/conf/_common/druid-log4j.xml')
-                                                     )
-                              )
-
-    self.assertResourceCalled('PropertiesFile', "runtime.properties",
-                              dir=format('/usr/hdp/current/{role}/conf/historical'),
-                              properties=self.getConfig()['configurations']['druid-historical'],
-                              owner='druid',
-                              group='hadoop'
-                              )
-
-    self.assertResourceCalled('File', format("/usr/hdp/current/{role}/conf/historical/jvm.config"),
-                            owner='druid',
-                            group='hadoop',
-                            content=InlineTemplate("-server \n-Xms{{node_heap_memory}}m \n-Xmx{{node_heap_memory}}m \n-XX:MaxDirectMemorySize={{node_direct_memory}}m \n-Dlog4j.configurationFile={{log4j_config_file}} \n-Dlog4j.debug \n{{node_jvm_opts}}",
-                                                   node_heap_memory=1024,
-                                                   node_direct_memory=2048,
-                                                   node_jvm_opts='-Duser.timezone=UTC -Dfile.encoding=UTF-8',
-                                                   log4j_config_file=format('/usr/hdp/current/{role}/conf/_common/druid-log4j.xml')
-                                                   )
-                            )
-
-
-    self.assertResourceCalled('PropertiesFile', "runtime.properties",
-                          dir=format('/usr/hdp/current/{role}/conf/broker'),
-                          properties=self.getConfig()['configurations']['druid-broker'],
-                          owner='druid',
-                          group='hadoop'
-                          )
-
-    self.assertResourceCalled('File', format("/usr/hdp/current/{role}/conf/broker/jvm.config"),
-                          owner='druid',
-                          group='hadoop',
-                          content=InlineTemplate("-server \n-Xms{{node_heap_memory}}m \n-Xmx{{node_heap_memory}}m \n-XX:MaxDirectMemorySize={{node_direct_memory}}m \n-Dlog4j.configurationFile={{log4j_config_file}} \n-Dlog4j.debug \n{{node_jvm_opts}}",
-                                                 node_heap_memory=1024,
-                                                 node_direct_memory=2048,
-                                                 node_jvm_opts='-Duser.timezone=UTC -Dfile.encoding=UTF-8',
-                                                 log4j_config_file=format('/usr/hdp/current/{role}/conf/_common/druid-log4j.xml')
-                                                 )
-                          )
-
-
-    self.assertResourceCalled('PropertiesFile', "runtime.properties",
-                          dir=format('/usr/hdp/current/{role}/conf/middleManager'),
-                          properties=self.getConfig()['configurations']['druid-middlemanager'],
-                          owner='druid',
-                          group='hadoop'
-                          )
-
-    self.assertResourceCalled('File', format("/usr/hdp/current/{role}/conf/middleManager/jvm.config"),
-                          owner='druid',
-                          group='hadoop',
-                          content=InlineTemplate("-server \n-Xms{{node_heap_memory}}m \n-Xmx{{node_heap_memory}}m \n-XX:MaxDirectMemorySize={{node_direct_memory}}m \n-Dlog4j.configurationFile={{log4j_config_file}} \n-Dlog4j.debug \n{{node_jvm_opts}}",
-                                                 node_heap_memory=1024,
-                                                 node_direct_memory=2048,
-                                                 node_jvm_opts='-Duser.timezone=UTC -Dfile.encoding=UTF-8',
-                                                 log4j_config_file=format('/usr/hdp/current/{role}/conf/_common/druid-log4j.xml')
-                                                 )
-                          )
-
-    self.assertResourceCalled('PropertiesFile', "runtime.properties",
-                              dir=format('/usr/hdp/current/{role}/conf/router'),
-                              properties=self.getConfig()['configurations']['druid-router'],
-                              owner='druid',
-                              group='hadoop'
-                              )
-
-    self.assertResourceCalled('File', format("/usr/hdp/current/{role}/conf/router/jvm.config"),
-                              owner='druid',
-                              group='hadoop',
-                              content=InlineTemplate("-server \n-Xms{{node_heap_memory}}m \n-Xmx{{node_heap_memory}}m \n-XX:MaxDirectMemorySize={{node_direct_memory}}m \n-Dlog4j.configurationFile={{log4j_config_file}} \n-Dlog4j.debug \n{{node_jvm_opts}}",
-                                                     node_heap_memory=1024,
-                                                     node_direct_memory=2048,
-                                                     node_jvm_opts='-Duser.timezone=UTC -Dfile.encoding=UTF-8',
-                                                     log4j_config_file=format('/usr/hdp/current/{role}/conf/_common/druid-log4j.xml')
-                                                     )
-                              )
-
-    self.assertResourceCalled('HdfsResource', '/user/druid',
-                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
-                              security_enabled = False,
-                              hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
-                              keytab = UnknownConfigurationMock(),
-                              default_fs = 'hdfs://c6401.ambari.apache.org:8020',
-                              hdfs_site = {u'a': u'b'},
-                              kinit_path_local = '/usr/bin/kinit',
-                              principal_name = 'missing_principal',
-                              user = 'hdfs',
-                              owner = 'druid',
-                              hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
-                              type = 'directory',
-                              action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
-                              dfs_type = '',
-                              recursive_chown=True,
-                              recursive_chmod=True
-                              )
-
-    self.assertResourceCalled('HdfsResource', '/user/druid/data',
-                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
-                              security_enabled = False,
-                              hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
-                              keytab = UnknownConfigurationMock(),
-                              default_fs = 'hdfs://c6401.ambari.apache.org:8020',
-                              hdfs_site = {u'a': u'b'},
-                              kinit_path_local = '/usr/bin/kinit',
-                              principal_name = 'missing_principal',
-                              user = 'hdfs',
-                              owner = 'druid',
-                              hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
-                              type = 'directory',
-                              action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
-                              dfs_type = '',
-                              mode=0755
-                              )
-
-    self.assertResourceCalled('HdfsResource', '/tmp/druid-indexing',
-                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
-                              security_enabled = False,
-                              hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
-                              keytab = UnknownConfigurationMock(),
-                              default_fs = 'hdfs://c6401.ambari.apache.org:8020',
-                              hdfs_site = {u'a': u'b'},
-                              kinit_path_local = '/usr/bin/kinit',
-                              principal_name = 'missing_principal',
-                              user = 'hdfs',
-                              owner = 'druid',
-                              hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
-                              type = 'directory',
-                              action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
-                              dfs_type = '',
-                              mode=0755
-                              )
-
-    self.assertResourceCalled('HdfsResource', '/user/druid/logs',
-                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
-                              security_enabled = False,
-                              hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
-                              keytab = UnknownConfigurationMock(),
-                              default_fs = 'hdfs://c6401.ambari.apache.org:8020',
-                              hdfs_site = {u'a': u'b'},
-                              kinit_path_local = '/usr/bin/kinit',
-                              principal_name = 'missing_principal',
-                              user = 'hdfs',
-                              owner = 'druid',
-                              hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
-                              type = 'directory',
-                              action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
-                              dfs_type = '',
-                              mode=0755
-                              )
-
-    self.assertResourceCalled('Directory', format('/usr/hdp/current/{role}/extensions'),
-                              mode=0755,
-                              cd_access='a',
-                              owner='druid',
-                              group='hadoop',
-                              create_parents=True,
-                              recursive_ownership=True
-                              )
-
-    self.assertResourceCalled('Directory', format('/usr/hdp/current/{role}/hadoop-dependencies'),
-                              mode=0755,
-                              cd_access='a',
-                              owner='druid',
-                              group='hadoop',
-                              create_parents=True,
-                              recursive_ownership=True
-                              )
-
-    self.assertResourceCalled('Execute', format("source /usr/hdp/current/{role}/conf/druid-env.sh ; java -classpath '/usr/hdp/current/{role}/lib/*' -Ddruid.extensions.loadList=[] -Ddruid.extensions.directory=/usr/hdp/current/{role}/extensions -Ddruid.extensions.hadoopDependenciesDir=/usr/hdp/current/{role}/hadoop-dependencies io.druid.cli.Main tools pull-deps -c mysql-metadata-storage --no-default-hadoop"),
-                              user='druid'
-                              )
-
-
-def mutable_config_dict(config):
-  rv = {}
-  for key, value in config.iteritems():
-    rv[key] = value
-  return rv

+ 0 - 575
ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py

@@ -1,575 +0,0 @@
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import json
-import os
-from unittest import TestCase
-from mock.mock import patch
-
-
-class TestHDP26StackAdvisor(TestCase):
-  def setUp(self):
-    import imp
-    self.maxDiff = None
-    self.testDirectory = os.path.dirname(os.path.abspath(__file__))
-    stackAdvisorPath = os.path.join(self.testDirectory, '../../../../../main/resources/stacks/stack_advisor.py')
-    hdp206StackAdvisorPath = os.path.join(self.testDirectory,
-                                          '../../../../../main/resources/stacks/HDP/2.0.6/services/stack_advisor.py')
-    hdp21StackAdvisorPath = os.path.join(self.testDirectory,
-                                         '../../../../../main/resources/stacks/HDP/2.1/services/stack_advisor.py')
-    hdp22StackAdvisorPath = os.path.join(self.testDirectory,
-                                         '../../../../../main/resources/stacks/HDP/2.2/services/stack_advisor.py')
-    hdp23StackAdvisorPath = os.path.join(self.testDirectory,
-                                         '../../../../../main/resources/stacks/HDP/2.3/services/stack_advisor.py')
-    hdp24StackAdvisorPath = os.path.join(self.testDirectory,
-                                         '../../../../../main/resources/stacks/HDP/2.4/services/stack_advisor.py')
-    hdp25StackAdvisorPath = os.path.join(self.testDirectory,
-                                         '../../../../../main/resources/stacks/HDP/2.5/services/stack_advisor.py')
-    hdp26StackAdvisorPath = os.path.join(self.testDirectory,
-                                         '../../../../../main/resources/stacks/HDP/2.6/services/stack_advisor.py')
-    hdp26StackAdvisorClassName = 'HDP26StackAdvisor'
-
-    with open(stackAdvisorPath, 'rb') as fp:
-      imp.load_module('stack_advisor', fp, stackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
-    with open(hdp206StackAdvisorPath, 'rb') as fp:
-      imp.load_module('stack_advisor_impl', fp, hdp206StackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
-    with open(hdp21StackAdvisorPath, 'rb') as fp:
-      imp.load_module('stack_advisor_impl', fp, hdp21StackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
-    with open(hdp22StackAdvisorPath, 'rb') as fp:
-      imp.load_module('stack_advisor_impl', fp, hdp22StackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
-    with open(hdp23StackAdvisorPath, 'rb') as fp:
-      imp.load_module('stack_advisor_impl', fp, hdp23StackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
-    with open(hdp24StackAdvisorPath, 'rb') as fp:
-      imp.load_module('stack_advisor_impl', fp, hdp24StackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
-    with open(hdp25StackAdvisorPath, 'rb') as fp:
-      imp.load_module('stack_advisor_impl', fp, hdp25StackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
-    with open(hdp26StackAdvisorPath, 'rb') as fp:
-      stack_advisor_impl = imp.load_module('stack_advisor_impl', fp, hdp26StackAdvisorPath,
-                                           ('.py', 'rb', imp.PY_SOURCE))
-    clazz = getattr(stack_advisor_impl, hdp26StackAdvisorClassName)
-    self.stackAdvisor = clazz()
-
-    # substitute method in the instance
-    self.get_system_min_uid_real = self.stackAdvisor.get_system_min_uid
-    self.stackAdvisor.get_system_min_uid = self.get_system_min_uid_magic
-
-  @patch('__builtin__.open')
-  @patch('os.path.exists')
-  def get_system_min_uid_magic(self, exists_mock, open_mock):
-    class MagicFile(object):
-      def read(self):
-        return """
-          #test line UID_MIN 200
-          UID_MIN 500
-          """
-
-      def __exit__(self, exc_type, exc_val, exc_tb):
-        pass
-
-      def __enter__(self):
-        return self
-
-    exists_mock.return_value = True
-    open_mock.return_value = MagicFile()
-    return self.get_system_min_uid_real()
-
-  def test_recommendDruidConfigurations_withMysql(self):
-    hosts = {
-      "items": [
-        {
-          "Hosts": {
-            "cpu_count": 4,
-            "total_mem": 50331648,
-            "disk_info": [
-              {"mountpoint": "/"},
-              {"mountpoint": "/dev/shm"},
-              {"mountpoint": "/vagrant"},
-              {"mountpoint": "/"},
-              {"mountpoint": "/dev/shm"},
-              {"mountpoint": "/vagrant"}
-            ],
-            "public_host_name": "c6401.ambari.apache.org",
-            "host_name": "c6401.ambari.apache.org"
-          },
-        }
-      ]
-    }
-
-    services = {
-      "Versions": {
-        "parent_stack_version": "2.5",
-        "stack_name": "HDP",
-        "stack_version": "2.6",
-        "stack_hierarchy": {
-          "stack_name": "HDP",
-          "stack_versions": ["2.5", "2.4", "2.3", "2.2", "2.1", "2.0.6"]
-        }
-      },
-      "services": [{
-        "StackServices": {
-          "service_name": "DRUID",
-        },
-        "components": [
-          {
-            "StackServiceComponents": {
-              "component_name": "DRUID_COORDINATOR",
-              "hostnames": ["c6401.ambari.apache.org"]
-            },
-          },
-          {
-            "StackServiceComponents": {
-              "component_name": "DRUID_OVERLORD",
-              "hostnames": ["c6401.ambari.apache.org"]
-            },
-          },
-          {
-            "StackServiceComponents": {
-              "component_name": "DRUID_BROKER",
-              "hostnames": ["c6401.ambari.apache.org"]
-            },
-          },
-          {
-            "StackServiceComponents": {
-              "component_name": "DRUID_HISTORICAL",
-              "hostnames": ["c6401.ambari.apache.org"]
-            },
-          },
-          {
-            "StackServiceComponents": {
-              "component_name": "DRUID_MIDDLEMANAGER",
-              "hostnames": ["c6401.ambari.apache.org"]
-            },
-          }
-        ]
-      }
-      ],
-      "configurations": {
-        "druid-common": {
-          "properties": {
-            "database_name": "druid",
-            "metastore_hostname": "c6401.ambari.apache.org",
-            "druid.metadata.storage.type": "mysql",
-            "druid.extensions.loadList": "[\"postgresql-metadata-storage\"]",
-            "druid.extensions.pullList": "[]"
-          }
-        }
-      }
-    }
-
-    clusterData = {
-      "cpu": 4,
-      "mapMemory": 30000,
-      "amMemory": 20000,
-      "reduceMemory": 20560,
-      "containers": 30,
-      "ramPerContainer": 512,
-      "referenceNodeManagerHost": {
-        "total_mem": 10240 * 1024
-      }
-    }
-
-    configurations = {
-    }
-
-    self.stackAdvisor.recommendDruidConfigurations(configurations, clusterData, services, hosts)
-    self.assertEquals(configurations,
-                      {'druid-historical': {
-                        'properties': {'druid.processing.numThreads': '3', 'druid.server.http.numThreads': '40'}},
-                        'druid-broker': {
-                          'properties': {'druid.processing.numThreads': '3', 'druid.server.http.numThreads': '40'}},
-                        'druid-common': {'properties': {'druid.extensions.loadList': '["mysql-metadata-storage"]',
-                                                        'druid.metadata.storage.connector.port': '3306',
-                                                        'druid.metadata.storage.connector.connectURI': 'jdbc:mysql://c6401.ambari.apache.org:3306/druid?createDatabaseIfNotExist=true',
-                                                        'druid.zk.service.host': '',
-                                                        'druid.extensions.pullList': '["io.druid.extensions:mysql-metadata-storage"]'}},
-                        'druid-env': {'properties': {},
-                                      'property_attributes': {'druid.coordinator.jvm.heap.memory': {'maximum': '49152'},
-                                                              'druid.overlord.jvm.heap.memory': {'maximum': '49152'},
-                                                              'druid.middlemanager.jvm.heap.memory': {
-                                                                'maximum': '49152'},
-                                                              'druid.historical.jvm.heap.memory': {'maximum': '49152'},
-                                                              'druid.broker.jvm.heap.memory': {'maximum': '49152'}}}}
-                      )
-
-  def test_recommendDruidConfigurations_WithPostgresql(self):
-    hosts = {
-      "items": [
-        {
-          "Hosts": {
-            "cpu_count": 4,
-            "total_mem": 50331648,
-            "disk_info": [
-              {"mountpoint": "/"},
-              {"mountpoint": "/dev/shm"},
-              {"mountpoint": "/vagrant"},
-              {"mountpoint": "/"},
-              {"mountpoint": "/dev/shm"},
-              {"mountpoint": "/vagrant"}
-            ],
-            "public_host_name": "c6401.ambari.apache.org",
-            "host_name": "c6401.ambari.apache.org"
-          }
-        }
-      ]
-    }
-
-    services = {
-      "Versions": {
-        "parent_stack_version": "2.5",
-        "stack_name": "HDP",
-        "stack_version": "2.6",
-        "stack_hierarchy": {
-          "stack_name": "HDP",
-          "stack_versions": ["2.5", "2.4", "2.3", "2.2", "2.1", "2.0.6"]
-        }
-      },
-      "services": [{
-        "StackServices": {
-          "service_name": "DRUID",
-        },
-        "components": [
-          {
-            "StackServiceComponents": {
-              "component_name": "DRUID_COORDINATOR",
-              "hostnames": ["c6401.ambari.apache.org"]
-            },
-          },
-          {
-            "StackServiceComponents": {
-              "component_name": "DRUID_OVERLORD",
-              "hostnames": ["c6401.ambari.apache.org"]
-            },
-          },
-          {
-            "StackServiceComponents": {
-              "component_name": "DRUID_BROKER",
-              "hostnames": ["c6401.ambari.apache.org"]
-            },
-          },
-          {
-            "StackServiceComponents": {
-              "component_name": "DRUID_HISTORICAL",
-              "hostnames": ["c6401.ambari.apache.org"]
-            },
-          },
-          {
-            "StackServiceComponents": {
-              "component_name": "DRUID_MIDDLEMANAGER",
-              "hostnames": ["c6401.ambari.apache.org"]
-            },
-          }
-        ]
-      }
-      ],
-      "configurations": {
-        "druid-common": {
-          "properties": {
-            "database_name": "druid",
-            "metastore_hostname": "c6401.ambari.apache.org",
-            "druid.metadata.storage.type": "postgres",
-            "druid.extensions.loadList": "[\"mysql-metadata-storage\"]",
-            "druid.extensions.pullList": "[]"
-          }
-        }
-      }
-    }
-
-    clusterData = {
-    }
-
-    configurations = {
-    }
-
-    self.stackAdvisor.recommendDruidConfigurations(configurations, clusterData, services, hosts)
-    self.assertEquals(configurations,
-                      {'druid-historical': {
-                        'properties': {'druid.processing.numThreads': '3', 'druid.server.http.numThreads': '40'}},
-                        'druid-broker': {
-                          'properties': {'druid.processing.numThreads': '3', 'druid.server.http.numThreads': '40'}},
-                        'druid-common': {'properties': {'druid.extensions.loadList': '["postgresql-metadata-storage"]',
-                                                        'druid.metadata.storage.connector.port': '5432',
-                                                        'druid.metadata.storage.connector.connectURI': 'jdbc:postgresql://c6401.ambari.apache.org:5432/druid',
-                                                        'druid.zk.service.host': '',
-                                                        'druid.extensions.pullList': '[]'}},
-                        'druid-env': {'properties': {},
-                                      'property_attributes': {'druid.coordinator.jvm.heap.memory': {'maximum': '49152'},
-                                                              'druid.overlord.jvm.heap.memory': {'maximum': '49152'},
-                                                              'druid.middlemanager.jvm.heap.memory': {
-                                                                'maximum': '49152'},
-                                                              'druid.historical.jvm.heap.memory': {'maximum': '49152'},
-                                                              'druid.broker.jvm.heap.memory': {'maximum': '49152'}}}}
-                      )
-
-  def test_recommendDruidConfigurations_WithDerby(self):
-    hosts = {
-      "items": [
-        {
-          "Hosts": {
-            "cpu_count": 4,
-            "total_mem": 50331648,
-            "disk_info": [
-              {"mountpoint": "/"},
-              {"mountpoint": "/dev/shm"},
-              {"mountpoint": "/vagrant"},
-              {"mountpoint": "/"},
-              {"mountpoint": "/dev/shm"},
-              {"mountpoint": "/vagrant"}
-            ],
-            "public_host_name": "c6401.ambari.apache.org",
-            "host_name": "c6401.ambari.apache.org"
-          }
-        }
-      ]
-    }
-
-    services = {
-      "Versions": {
-        "parent_stack_version": "2.5",
-        "stack_name": "HDP",
-        "stack_version": "2.6",
-        "stack_hierarchy": {
-          "stack_name": "HDP",
-          "stack_versions": ["2.5", "2.4", "2.3", "2.2", "2.1", "2.0.6"]
-        }
-      },
-      "services": [{
-        "StackServices": {
-          "service_name": "DRUID",
-        },
-        "components": [
-          {
-            "StackServiceComponents": {
-              "component_name": "DRUID_COORDINATOR",
-              "hostnames": ["c6401.ambari.apache.org"]
-            },
-          },
-          {
-            "StackServiceComponents": {
-              "component_name": "DRUID_OVERLORD",
-              "hostnames": ["c6401.ambari.apache.org"]
-            },
-          },
-          {
-            "StackServiceComponents": {
-              "component_name": "DRUID_BROKER",
-              "hostnames": ["c6401.ambari.apache.org"]
-            },
-          },
-          {
-            "StackServiceComponents": {
-              "component_name": "DRUID_HISTORICAL",
-              "hostnames": ["c6401.ambari.apache.org"]
-            },
-          },
-          {
-            "StackServiceComponents": {
-              "component_name": "DRUID_MIDDLEMANAGER",
-              "hostnames": ["c6401.ambari.apache.org"]
-            },
-          }
-        ]
-      }
-      ],
-      "configurations": {
-        "druid-common": {
-          "properties": {
-            "database_name": "druid",
-            "metastore_hostname": "c6401.ambari.apache.org",
-            "druid.metadata.storage.type": "derby",
-            "druid.extensions.loadList": "[\"mysql-metadata-storage\"]",
-            "druid.extensions.pullList": "[]"
-          }
-        }
-      }
-    }
-
-    clusterData = {
-    }
-
-    configurations = {
-    }
-
-    self.stackAdvisor.recommendDruidConfigurations(configurations, clusterData, services, hosts)
-    self.assertEquals(configurations,
-                      {'druid-historical': {
-                        'properties': {'druid.processing.numThreads': '3', 'druid.server.http.numThreads': '40'}},
-                        'druid-broker': {
-                          'properties': {'druid.processing.numThreads': '3', 'druid.server.http.numThreads': '40'}},
-                        'druid-common': {'properties': {'druid.extensions.loadList': '[]',
-                                                        'druid.metadata.storage.connector.port': '1527',
-                                                        'druid.metadata.storage.connector.connectURI': 'jdbc:derby://c6401.ambari.apache.org:1527/druid;create=true',
-                                                        'druid.zk.service.host': '',
-                                                        'druid.extensions.pullList': '[]'}},
-                        'druid-env': {'properties': {},
-                                      'property_attributes': {'druid.coordinator.jvm.heap.memory': {'maximum': '49152'},
-                                                              'druid.overlord.jvm.heap.memory': {'maximum': '49152'},
-                                                              'druid.middlemanager.jvm.heap.memory': {
-                                                                'maximum': '49152'},
-                                                              'druid.historical.jvm.heap.memory': {'maximum': '49152'},
-                                                              'druid.broker.jvm.heap.memory': {'maximum': '49152'}}}}
-                      )
-
-  def test_recommendDruidConfigurations_heterogeneous_hosts(self):
-    hosts = {
-      "items": [
-        {
-          "href": "/api/v1/hosts/c6401.ambari.apache.org",
-          "Hosts": {
-            "cpu_count": 4,
-            "total_mem": 50331648,
-            "disk_info": [
-              {"mountpoint": "/"},
-              {"mountpoint": "/dev/shm"},
-              {"mountpoint": "/vagrant"},
-              {"mountpoint": "/"},
-              {"mountpoint": "/dev/shm"},
-              {"mountpoint": "/vagrant"}
-            ],
-            "public_host_name": "c6401.ambari.apache.org",
-            "host_name": "c6401.ambari.apache.org"
-          }
-        }, {
-          "href": "/api/v1/hosts/c6402.ambari.apache.org",
-          "Hosts": {
-            "cpu_count": 1,
-            "total_mem": 1922680,
-            "disk_info": [
-              {"mountpoint": "/"},
-              {"mountpoint": "/dev/shm"},
-              {"mountpoint": "/vagrant"},
-              {"mountpoint": "/"},
-              {"mountpoint": "/dev/shm"},
-              {"mountpoint": "/vagrant"}
-            ],
-            "public_host_name": "c6402.ambari.apache.org",
-            "host_name": "c6402.ambari.apache.org"
-          }
-        },
-        {
-          "href": "/api/v1/hosts/c6403.ambari.apache.org",
-          "Hosts": {
-            "cpu_count": 3,
-            "total_mem": 3845360,
-            "disk_info": [
-              {"mountpoint": "/"},
-              {"mountpoint": "/dev/shm"},
-              {"mountpoint": "/vagrant"},
-              {"mountpoint": "/"},
-              {"mountpoint": "/dev/shm"},
-              {"mountpoint": "/vagrant"}
-            ],
-            "public_host_name": "c6403.ambari.apache.org",
-            "host_name": "c6403.ambari.apache.org"
-          }
-        }
-      ]
-    }
-
-    services = {
-      "Versions": {
-        "parent_stack_version": "2.5",
-        "stack_name": "HDP",
-        "stack_version": "2.6",
-        "stack_hierarchy": {
-          "stack_name": "HDP",
-          "stack_versions": ["2.5", "2.4", "2.3", "2.2", "2.1", "2.0.6"]
-        }
-      },
-      "services": [{
-        "StackServices": {
-          "service_name": "DRUID",
-        },
-        "components": [
-          {
-            "StackServiceComponents": {
-              "component_name": "DRUID_COORDINATOR",
-              "hostnames": ["c6401.ambari.apache.org"]
-            },
-          },
-          {
-            "StackServiceComponents": {
-              "component_name": "DRUID_OVERLORD",
-              "hostnames": ["c6401.ambari.apache.org"]
-            },
-          },
-          {
-            "StackServiceComponents": {
-              "component_name": "DRUID_BROKER",
-              "hostnames": ["c6402.ambari.apache.org", "c6403.ambari.apache.org"]
-            },
-          },
-          {
-            "StackServiceComponents": {
-              "component_name": "DRUID_HISTORICAL",
-              "hostnames": ["c6401.ambari.apache.org", "c6403.ambari.apache.org"]
-            },
-          },
-          {
-            "StackServiceComponents": {
-              "component_name": "DRUID_MIDDLEMANAGER",
-              "hostnames": ["c6401.ambari.apache.org"]
-            },
-          }
-        ]
-      }
-      ],
-      "configurations": {
-        "druid-common": {
-          "properties": {
-            "database_name": "druid",
-            "metastore_hostname": "c6401.ambari.apache.org",
-            "druid.metadata.storage.type": "derby",
-            "druid.extensions.loadList": "[\"mysql-metadata-storage\"]",
-            "druid.extensions.pullList": "[]"
-          }
-        }
-      }
-    }
-
-    clusterData = {
-    }
-
-    configurations = {
-    }
-
-    self.stackAdvisor.recommendDruidConfigurations(configurations, clusterData, services, hosts)
-    self.assertEquals(configurations,
-                      {'druid-historical': {
-                        'properties': {'druid.processing.numThreads': '2', 'druid.server.http.numThreads': '40'}},
-                        'druid-broker': {
-                          'properties': {'druid.processing.numThreads': '1', 'druid.server.http.numThreads': '40'}},
-                        'druid-common': {'properties': {'druid.extensions.loadList': '[]',
-                                                        'druid.metadata.storage.connector.port': '1527',
-                                                        'druid.metadata.storage.connector.connectURI': 'jdbc:derby://c6401.ambari.apache.org:1527/druid;create=true',
-                                                        'druid.zk.service.host': '',
-                                                        'druid.extensions.pullList': '[]'}},
-                        'druid-env': {'properties': {},
-                                      'property_attributes': {'druid.coordinator.jvm.heap.memory': {'maximum': '49152'},
-                                                              'druid.overlord.jvm.heap.memory': {'maximum': '49152'},
-                                                              'druid.middlemanager.jvm.heap.memory': {
-                                                                'maximum': '49152'},
-                                                              'druid.historical.jvm.heap.memory': {'maximum': '3755'},
-                                                              'druid.broker.jvm.heap.memory': {'maximum': '1877'}}}}
-                      )
-
-
-def load_json(self, filename):
-  file = os.path.join(self.testDirectory, filename)
-  with open(file, 'rb') as f:
-    data = json.load(f)
-  return data

File diff suppressed because it is too large
+ 0 - 100
ambari-server/src/test/python/stacks/2.6/configs/default.json


Some files were not shown because too many files changed in this diff