Selaa lähdekoodia

Revert "[RTC 136620]: Introduce BigInsights stacks on Ambari 2.4 branch"

This reverts commit 66984d9a06f3f3072712ad43fbdb63e26af503b7.
Mahadev Konar 9 vuotta sitten
vanhempi
commit
44e21f8ef2
100 muutettua tiedostoa jossa 0 lisäystä ja 8018 poistoa
  1. 0 4
      ambari-server/pom.xml
  2. 0 208
      ambari-server/src/main/resources/common-services/SOLR/5.1.0.4.1/configuration/solr-env.xml
  3. 0 82
      ambari-server/src/main/resources/common-services/SOLR/5.1.0.4.1/configuration/solr-log4j.xml
  4. 0 44
      ambari-server/src/main/resources/common-services/SOLR/5.1.0.4.1/configuration/solr-site.xml
  5. 0 47
      ambari-server/src/main/resources/common-services/SOLR/5.1.0.4.1/kerberos.json
  6. 0 75
      ambari-server/src/main/resources/common-services/SOLR/5.1.0.4.1/metainfo.xml
  7. 0 19
      ambari-server/src/main/resources/common-services/SOLR/5.1.0.4.1/package/scripts/__init__.py
  8. 0 130
      ambari-server/src/main/resources/common-services/SOLR/5.1.0.4.1/package/scripts/params.py
  9. 0 91
      ambari-server/src/main/resources/common-services/SOLR/5.1.0.4.1/package/scripts/service_check.py
  10. 0 142
      ambari-server/src/main/resources/common-services/SOLR/5.1.0.4.1/package/scripts/solr.py
  11. 0 36
      ambari-server/src/main/resources/common-services/SOLR/5.1.0.4.1/package/scripts/solr_client.py
  12. 0 110
      ambari-server/src/main/resources/common-services/SOLR/5.1.0.4.1/package/scripts/solr_server.py
  13. 0 59
      ambari-server/src/main/resources/common-services/SOLR/5.1.0.4.1/package/scripts/solr_service.py
  14. 0 135
      ambari-server/src/main/resources/common-services/SOLR/5.1.0.4.1/package/scripts/solr_upgrade.py
  15. 0 32
      ambari-server/src/main/resources/common-services/SOLR/5.1.0.4.1/package/scripts/status_params.py
  16. 0 51
      ambari-server/src/main/resources/common-services/SOLR/5.1.0.4.1/package/templates/solr.xml.j2
  17. 0 234
      ambari-server/src/main/resources/common-services/SOLR/5.5.0.4.2/configuration/solr-env.xml
  18. 0 82
      ambari-server/src/main/resources/common-services/SOLR/5.5.0.4.2/configuration/solr-log4j.xml
  19. 0 44
      ambari-server/src/main/resources/common-services/SOLR/5.5.0.4.2/configuration/solr-site.xml
  20. 0 53
      ambari-server/src/main/resources/common-services/SOLR/5.5.0.4.2/kerberos.json
  21. 0 82
      ambari-server/src/main/resources/common-services/SOLR/5.5.0.4.2/metainfo.xml
  22. 0 19
      ambari-server/src/main/resources/common-services/SOLR/5.5.0.4.2/package/scripts/__init__.py
  23. 0 156
      ambari-server/src/main/resources/common-services/SOLR/5.5.0.4.2/package/scripts/params.py
  24. 0 91
      ambari-server/src/main/resources/common-services/SOLR/5.5.0.4.2/package/scripts/service_check.py
  25. 0 95
      ambari-server/src/main/resources/common-services/SOLR/5.5.0.4.2/package/scripts/solr.py
  26. 0 36
      ambari-server/src/main/resources/common-services/SOLR/5.5.0.4.2/package/scripts/solr_client.py
  27. 0 107
      ambari-server/src/main/resources/common-services/SOLR/5.5.0.4.2/package/scripts/solr_server.py
  28. 0 71
      ambari-server/src/main/resources/common-services/SOLR/5.5.0.4.2/package/scripts/solr_service.py
  29. 0 135
      ambari-server/src/main/resources/common-services/SOLR/5.5.0.4.2/package/scripts/solr_upgrade.py
  30. 0 32
      ambari-server/src/main/resources/common-services/SOLR/5.5.0.4.2/package/scripts/status_params.py
  31. 0 51
      ambari-server/src/main/resources/common-services/SOLR/5.5.0.4.2/package/templates/solr.xml.j2
  32. 0 182
      ambari-server/src/main/resources/stacks/BigInsights/4.0/blueprints/multinode-default.json
  33. 0 133
      ambari-server/src/main/resources/stacks/BigInsights/4.0/blueprints/singlenode-default.json
  34. 0 268
      ambari-server/src/main/resources/stacks/BigInsights/4.0/configuration/cluster-env.xml
  35. 0 38
      ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/after-INSTALL/scripts/hook.py
  36. 0 88
      ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/after-INSTALL/scripts/params.py
  37. 0 89
      ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/after-INSTALL/scripts/shared_initialization.py
  38. 0 63
      ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/files/changeToSecureUid.sh
  39. 0 36
      ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/hook.py
  40. 0 226
      ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/params.py
  41. 0 242
      ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/shared_initialization.py
  42. 0 37
      ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/hook.py
  43. 0 111
      ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/params.py
  44. 0 91
      ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/repo_initialization.py
  45. 0 34
      ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/shared_initialization.py
  46. 0 29
      ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-RESTART/scripts/hook.py
  47. 0 65
      ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/checkForFormat.sh
  48. BIN
      ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/fast-hdfs-resource.jar
  49. 0 134
      ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/task-log4j.properties
  50. 0 66
      ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/topology_script.py
  51. 0 40
      ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/hook.py
  52. 0 211
      ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/params.py
  53. 0 71
      ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/rack_awareness.py
  54. 0 152
      ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/shared_initialization.py
  55. 0 43
      ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/commons-logging.properties.j2
  56. 0 21
      ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/exclude_hosts_list.j2
  57. 0 88
      ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
  58. 0 81
      ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/health_check.j2
  59. 0 21
      ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/include_hosts_list.j2
  60. 0 24
      ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/topology_mappings.data.j2
  61. 0 68
      ambari-server/src/main/resources/stacks/BigInsights/4.0/kerberos.json
  62. 0 22
      ambari-server/src/main/resources/stacks/BigInsights/4.0/metainfo.xml
  63. 0 265
      ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_features.json
  64. 0 4
      ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_tools.json
  65. 0 35
      ambari-server/src/main/resources/stacks/BigInsights/4.0/repos/repoinfo.xml
  66. 0 70
      ambari-server/src/main/resources/stacks/BigInsights/4.0/role_command_order.json
  67. 0 27
      ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/metainfo.xml
  68. 0 27
      ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/metainfo.xml
  69. 0 26
      ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/metainfo.xml
  70. 0 26
      ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/metainfo.xml
  71. 0 26
      ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/metainfo.xml
  72. 0 26
      ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/metainfo.xml
  73. 0 27
      ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/metainfo.xml
  74. 0 27
      ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/metainfo.xml
  75. 0 28
      ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/metainfo.xml
  76. 0 27
      ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SQOOP/metainfo.xml
  77. 0 30
      ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/metainfo.xml
  78. 0 26
      ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/metainfo.xml
  79. 0 24
      ambari-server/src/main/resources/stacks/BigInsights/4.0/services/stack_advisor.py
  80. 0 1
      ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_206.py
  81. 0 1
      ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_21.py
  82. 0 1
      ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_22.py
  83. 0 1
      ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_23.py
  84. 0 110
      ambari-server/src/main/resources/stacks/BigInsights/4.0/upgrades/config-upgrade.xml
  85. 0 717
      ambari-server/src/main/resources/stacks/BigInsights/4.0/upgrades/nonrolling-upgrade-4.1.xml
  86. 0 569
      ambari-server/src/main/resources/stacks/BigInsights/4.0/upgrades/upgrade-4.1.xml
  87. 0 95
      ambari-server/src/main/resources/stacks/BigInsights/4.0/widgets.json
  88. 0 47
      ambari-server/src/main/resources/stacks/BigInsights/4.1/kerberos.json
  89. 0 23
      ambari-server/src/main/resources/stacks/BigInsights/4.1/metainfo.xml
  90. 0 44
      ambari-server/src/main/resources/stacks/BigInsights/4.1/repos/repoinfo.xml
  91. 0 32
      ambari-server/src/main/resources/stacks/BigInsights/4.1/repos/repoinfo.xml.amd64_RH6
  92. 0 32
      ambari-server/src/main/resources/stacks/BigInsights/4.1/repos/repoinfo.xml.amd64_RH7
  93. 0 32
      ambari-server/src/main/resources/stacks/BigInsights/4.1/repos/repoinfo.xml.amd64_SLES
  94. 0 32
      ambari-server/src/main/resources/stacks/BigInsights/4.1/repos/repoinfo.xml.ppc64le_RH7
  95. 0 32
      ambari-server/src/main/resources/stacks/BigInsights/4.1/repos/repoinfo.xml.s390x_RH7
  96. 0 22
      ambari-server/src/main/resources/stacks/BigInsights/4.1/role_command_order.json
  97. 0 28
      ambari-server/src/main/resources/stacks/BigInsights/4.1/services/AMBARI_METRICS/metainfo.xml
  98. 0 70
      ambari-server/src/main/resources/stacks/BigInsights/4.1/services/FLUME/configuration/flume-env.xml
  99. 0 36
      ambari-server/src/main/resources/stacks/BigInsights/4.1/services/FLUME/metainfo.xml
  100. 0 45
      ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HBASE/metainfo.xml

+ 0 - 4
ambari-server/pom.xml

@@ -303,10 +303,6 @@
             <!-- Stack definitions -->
             <exclude>src/main/resources/stacks/HDP/2.0._/services/HBASE/package/templates/regionservers.j2</exclude>
             <exclude>src/main/resources/stacks/HDPWIN/2.1/services/*/configuration*/*</exclude>
-            <exclude>src/main/resources/stacks/BigInsights/4.0/stack-advisor/**</exclude>
-            <exclude>src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/smoketest_metrics.json.j2</exclude>
-            <exclude>src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/service-metrics/*.txt</exclude>
-   
 
             <!--test samples -->
             <exclude>src/test/resources/TestAmbaryServer.samples/**</exclude>

+ 0 - 208
ambari-server/src/main/resources/common-services/SOLR/5.1.0.4.1/configuration/solr-env.xml

@@ -1,208 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<configuration>
-   <property>
-    <name>solr_user</name>
-    <description>User to run Solr as</description>
-    <property-type>USER</property-type>
-    <value>solr</value>
-  </property>
-  
-  <property>
-    <name>solr_lib_dir</name>
-    <value>/var/lib/solr</value>
-    <description>Directory for writable Solr files and index data</description>
-  </property>
-
-  <property>
-    <name>solr_pid_dir</name>
-    <value>/var/run/solr</value>
-  </property>
-
-  <property>
-    <name>solr_log_dir</name>
-    <value>/var/log/solr</value>
-  </property>
-
-  <property>
-    <name>solr_port</name>
-    <value>8983</value>
-    <description>Sets the port Solr binds to, default is 8983</description>
-  </property>
-
-  <property>
-    <name>solr_hdfs_home_dir</name>
-    <value>/apps/solr/data</value>
-    <description>A root location in HDFS for Solr to write collection data to. Rather than specifying an HDFS location for the data directory or update log directory, use this to specify one root location and have everything automatically created within this HDFS</description>
-  </property>
-
-  
-  <property>
-    <name>ZOOKEEPER_CHROOT</name>
-    <value>/solr</value>
-    <description>If you're using a ZooKeeper instance that is shared by other systems, it's recommended to isolate the SolrCloud znode tree using ZooKeeper's chroot support. 
-    For instance, to ensure all znodes created by SolrCloud are stored under /solr, you can put /solr on the end of your ZK_HOST connection string, such as: ZK_HOST=zk1,zk2,zk3/solr</description>
-  </property>
-  
- <property>
-    <name>content</name>
-    <description>This is the jinja template for solr.in.sh file</description>
-    <value>
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-export JAVA_HOME={{java64_home}}
-
-# By default the script will use JAVA_HOME to determine which java
-# to use, but you can set a specific path for Solr to use without
-# affecting other Java applications on your server/workstation.
-#SOLR_JAVA_HOME=""
-
-
-
-# Increase Java Min/Max Heap as needed to support your indexing / query needs
-SOLR_JAVA_MEM="-Xms512m -Xmx512m"
-
-
-# Enable verbose GC logging
-GC_LOG_OPTS="-verbose:gc -XX:+PrintHeapAtGC -XX:+PrintGCDetails \
--XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+PrintTenuringDistribution -XX:+PrintGCApplicationStoppedTime"
-
-
-# These GC settings have shown to work well for a number of common Solr workloads
-GC_TUNE="-XX:NewRatio=3 \
--XX:SurvivorRatio=4 \
--XX:TargetSurvivorRatio=90 \
--XX:MaxTenuringThreshold=8 \
--XX:+UseConcMarkSweepGC \
--XX:+UseParNewGC \
--XX:ConcGCThreads=4 -XX:ParallelGCThreads=4 \
--XX:+CMSScavengeBeforeRemark \
--XX:PretenureSizeThreshold=64m \
--XX:+UseCMSInitiatingOccupancyOnly \
--XX:CMSInitiatingOccupancyFraction=50 \
--XX:CMSMaxAbortablePrecleanTime=6000 \
--XX:+CMSParallelRemarkEnabled \
--XX:+ParallelRefProcEnabled \
--XX:MaxDirectMemorySize=20g"
-
-
-# Set the ZooKeeper connection string if using an external ZooKeeper ensemble
-# e.g. host1:2181,host2:2181/chroot
-# Leave empty if not using SolrCloud
-#ZK_HOST=""
-
-
-
-# Set the ZooKeeper client timeout (for SolrCloud mode)
-#ZK_CLIENT_TIMEOUT="15000"
-
-
-# By default the start script uses "localhost"; override the hostname here
-# for production SolrCloud environments to control the hostname exposed to cluster state
-#SOLR_HOST="192.168.1.1"
-
-# By default the start script uses UTC; override the timezone if needed
-#SOLR_TIMEZONE="UTC"
-
-# Set to true to activate the JMX RMI connector to allow remote JMX client applications
-# to monitor the JVM hosting Solr; set to "false" to disable that behavior
-# (false is recommended in production environments)
-ENABLE_REMOTE_JMX_OPTS="false"
-
-
-# The script will use SOLR_PORT+10000 for the RMI_PORT or you can set it here
-# RMI_PORT=18983
-
-
-# Anything you add to the SOLR_OPTS variable will be included in the java
-# start command line as-is, in ADDITION to other options. If you specify the
-# -a option on start script, those options will be appended as well. Examples:
-#SOLR_OPTS="$SOLR_OPTS -Dsolr.autoSoftCommit.maxTime=3000"
-#SOLR_OPTS="$SOLR_OPTS -Dsolr.autoCommit.maxTime=60000"
-#SOLR_OPTS="$SOLR_OPTS -Dsolr.clustering.enabled=true"
-
-
-
-# Location where the bin/solr script will save PID files for running instances
-# If not set, the script will create PID files in $SOLR_TIP/bin
-#SOLR_PID_DIR=
-
-
-
-# Path to a directory where Solr creates index files, the specified directory
-# must contain a solr.xml; by default, Solr will use server/solr
-#SOLR_HOME=
-
-
-
-# Solr provides a default Log4J configuration properties file in server/resources
-# however, you may want to customize the log settings and file appender location
-# so you can point the script to use a different log4j.properties file
-#LOG4J_PROPS=/var/solr/log4j.properties
-
-
-
-# Location where Solr should write logs to; should agree with the file appender
-# settings in server/resources/log4j.properties
-#SOLR_LOGS_DIR=
-
-
-
-# Sets the port Solr binds to, default is 8983
-#SOLR_PORT=8983
-
-
-
-# Uncomment to set SSL-related system properties
-# Be sure to update the paths to the correct keystore for your environment
-#SOLR_SSL_OPTS="-Djavax.net.ssl.keyStore=etc/solr-ssl.keystore.jks \
-#-Djavax.net.ssl.keyStorePassword=secret \
-#-Djavax.net.ssl.trustStore=etc/solr-ssl.keystore.jks \
-#-Djavax.net.ssl.trustStorePassword=secret"
-
-
-
-# Uncomment to set a specific SSL port (-Djetty.ssl.port=N); if not set
-# and you are using SSL, then the start script will use SOLR_PORT for the SSL port
-#SOLR_SSL_PORT=
-
-
-SOLR_PID_DIR={{pid_dir}}
-SOLR_HOME={{lib_dir}}/data
-LOG4J_PROPS={{solr_conf_dir}}/log4j.properties
-SOLR_LOGS_DIR={{log_dir}}
-SOLR_PORT={{solr_port}}
-SOLR_MODE=solrcloud
-ZK_HOST={{zookeeper_hosts_list}}{{zookeeper_chroot}}
-SOLR_HOST={{hostname}}
-
-# Comment out the following SOLR_OPTS setting to config Solr to write its index and transaction log files to local filesystem. 
-# Data (index and transaction log files) exists on HDFS will not be moved to local filesystem, 
-# after you change this config, they will not be available from local filesystem.
-SOLR_OPTS="-Dsolr.directoryFactory=HdfsDirectoryFactory \
--Dsolr.lock.type=hdfs \
--Dsolr.hdfs.confdir=/etc/hadoop/conf \
--Dsolr.hdfs.home={{fs_root}}{{solr_hdfs_home_dir}} \
--Dsolr.hdfs.security.kerberos.enabled={{sole_kerberos_enabled}} \
--Dsolr.hdfs.security.kerberos.keytabfile={{solr_keytab}} \
--Dsolr.hdfs.security.kerberos.principal={{solr_principal}}"
-
-    </value>
-  </property>  
-  
-</configuration>

+ 0 - 82
ambari-server/src/main/resources/common-services/SOLR/5.1.0.4.1/configuration/solr-log4j.xml

@@ -1,82 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-
-  <property>
-    <name>content</name>
-    <description>Custom log4j.properties</description>
-    <value>
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-#
-# Solr Logging Configuration
-#
-
-#  Logging level
-solr.log=${solr.solr.home}/../logs
-log4j.rootLogger=INFO, file, CONSOLE
-
-log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
-
-log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
-log4j.appender.CONSOLE.layout.ConversionPattern=%-4r [%t] %-5p %c %x [%X{collection} %X{shard} %X{replica} %X{core}] \u2013 %m%n
-
-#- size rotation with log cleanup.
-log4j.appender.file=org.apache.log4j.RollingFileAppender
-log4j.appender.file.MaxFileSize=4MB
-log4j.appender.file.MaxBackupIndex=9
-
-#- File to log to and log format
-log4j.appender.file.File=${solr.log}/solr.log
-log4j.appender.file.layout=org.apache.log4j.PatternLayout
-log4j.appender.file.layout.ConversionPattern=%-5p - %d{yyyy-MM-dd HH:mm:ss.SSS}; [%X{collection} %X{shard} %X{replica} %X{core}] %C; %m\n
-
-log4j.logger.org.apache.zookeeper=WARN
-log4j.logger.org.apache.hadoop=WARN
-
-# set to INFO to enable infostream log messages
-log4j.logger.org.apache.solr.update.LoggingInfoStream=OFF
-    </value>
-  </property>
-
-</configuration>

+ 0 - 44
ambari-server/src/main/resources/common-services/SOLR/5.1.0.4.1/configuration/solr-site.xml

@@ -1,44 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="true">
-  <property>
-    <name>solr.hdfs.security.kerberos.enabled</name>
-    <value>false</value>
-    <description>Set to true to enable Kerberos authentication</description>
-  </property>
-
-  <property>
-    <name>solr.hdfs.security.kerberos.keytabfile</name>
-    <value>/etc/security/keytabs/solr.service.keytab</value>
-    <description>A keytab file contains pairs of Kerberos principals and encrypted keys which allows for password-less authentication when Solr attempts to authenticate with secure Hadoop.
-    This file will need to be present on all Solr servers at the same path provided in this parameter.
-    </description>
-  </property>
-
-  <property>
-    <name>solr.hdfs.security.kerberos.principal</name>
-    <value>solr/_HOST@EXAMPLE.COM</value>
-    <description>The Kerberos principal that Solr should use to authenticate to secure Hadoop; the format of a typical Kerberos V5 principal is: primary/instance@realm
-    </description>
-  </property>
-
-</configuration>

+ 0 - 47
ambari-server/src/main/resources/common-services/SOLR/5.1.0.4.1/kerberos.json

@@ -1,47 +0,0 @@
-{
-  "services": [
-    {
-      "name": "SOLR",
-      "identities": [
-        {
-          "name": "/smokeuser"
-        },
-      ],
-      "configurations": [
-        {
-          "solr-site": {
-              "solr.hdfs.security.kerberos.enabled":"true"
-          }
-        }
-      ],
-      "components": [
-        {
-          "name": "SOLR",
-          "identities": [
-            {
-              "name": "solr",
-              "principal": {
-                "value": "${solr-env/solr_user}/_HOST@${realm}",
-                "type": "service",
-                "configuration": "solr-site/solr.hdfs.security.kerberos.principal",
-                "local_username": "${solr-env/solr_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/solr.service.keytab",
-                "owner": {
-                  "name": "${solr-env/solr_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "solr-site/solr.hdfs.security.kerberos.keytabfile"
-              }
-            }
-          ]
-        }
-      ]
-    }
-  ]
-}

+ 0 - 75
ambari-server/src/main/resources/common-services/SOLR/5.1.0.4.1/metainfo.xml

@@ -1,75 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>SOLR</name>
-      <displayName>Solr</displayName>
-      <comment>Solr is the popular, blazing fast open source enterprise search platform from the Apache Lucene project
-      </comment>
-      <version>5.1.0.4.1</version>
-
-      <components>
-        <component>
-          <name>SOLR</name>
-          <displayName>Solr</displayName>
-          <category>MASTER</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/solr_server.py</script>
-            <scriptType>PYTHON</scriptType>
-	    <timeout>600</timeout>
-          </commandScript>
-          <configFiles>
-            <configFile>
-              <type>xml</type>
-              <fileName>solr-site.xml</fileName>
-              <dictionaryName>solr-site</dictionaryName>
-            </configFile>
-          </configFiles>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>solr_4_1_*</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <requiredServices>		
-        <service>HDFS</service>
-      </requiredServices>
-      
-      <configuration-dependencies>
-        <config-type>solr-log4j</config-type>
-      </configuration-dependencies>
-    </service>
-  </services>
-</metainfo>
-

+ 0 - 19
ambari-server/src/main/resources/common-services/SOLR/5.1.0.4.1/package/scripts/__init__.py

@@ -1,19 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""

+ 0 - 130
ambari-server/src/main/resources/common-services/SOLR/5.1.0.4.1/package/scripts/params.py

@@ -1,130 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-from resource_management.libraries import Script
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.resources import HdfsResource
-import status_params
-
-# server configurations
-config = Script.get_config()
-stack_name = default("/hostLevelParams/stack_name", None)
-
-zookeeper_hosts = config['clusterHostInfo']['zookeeper_hosts']
-zookeeper_hosts.sort()
-zookeeper_hosts_list=','.join(zookeeper_hosts)
-
-java64_home = config['hostLevelParams']['java_home']
-
-# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade.
-# Version being upgraded/downgraded to
-# It cannot be used during the initial Cluser Install because the version is not yet known.
-version = default("/commandParams/version", None)
-
-# current host stack version
-current_version = default("/hostLevelParams/current_version", None)
-stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-iop_stack_version = format_hdp_stack_version(stack_version_unformatted)
-
-# Upgrade direction
-upgrade_direction = default("/commandParams/upgrade_direction", None)
-
-solr_user=config['configurations']['solr-env']['solr_user']
-user_group=config['configurations']['cluster-env']['user_group']
-hostname = config['hostname']
-solr_server_hosts = config['clusterHostInfo']['solr_hosts'] 
-solr_server_host = solr_server_hosts[0]
-
-fs_root = config['configurations']['core-site']['fs.defaultFS']
-
-solr_home = '/usr/iop/current/solr-server'
-solr_conf_dir='/usr/iop/current/solr-server/conf'
-if compare_versions(format_hdp_stack_version(current_version), '4.2.0.0') >= 0:
-  if upgrade_direction is not None and upgrade_direction == Direction.DOWNGRADE and version is not None and compare_versions(format_hdp_stack_version(version), '4.2.0.0') < 0:
-    lib_dir=default("/configurations/solr-env/solr_lib_dir", None)
-  else:
-    lib_dir=default("/configurations/solr-env/solr_data_dir", None)
-    solr_data_dir=default("/configurations/solr-env/solr_data_dir", None)
-else: #IOP 4.1
-  if upgrade_direction is not None and upgrade_direction == Direction.UPGRADE:
-    solr_data_dir=default("/configurations/solr-env/solr_data_dir", None)
-    lib_dir=default("/configurations/solr-env/solr_data_dir", None)
-    old_lib_dir=default("/configurations/solr-env/solr_lib_dir", None)
-  else:
-    solr_data_dir=default("/configurations/solr-env/solr_lib_dir", None)
-    lib_dir=default("/configurations/solr-env/solr_lib_dir", None)
-log_dir=config['configurations']['solr-env']['solr_log_dir']
-pid_dir=config['configurations']['solr-env']['solr_pid_dir']
-solr_port=config['configurations']['solr-env']['solr_port']
-
-zookeeper_chroot=config['configurations']['solr-env']['ZOOKEEPER_CHROOT']
-
-solr_site = dict(config['configurations']['solr-site'])
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-solr_principal = solr_site['solr.hdfs.security.kerberos.principal']
-
-if security_enabled:
-  solr_principal = solr_principal.replace('_HOST',hostname)
-  solr_site['solr.hdfs.security.kerberos.principal']=solr_principal
-
-#kerberos
-sole_kerberos_enabled=config['configurations']['solr-site']['solr.hdfs.security.kerberos.enabled']
-solr_keytab=config['configurations']['solr-site']['solr.hdfs.security.kerberos.keytabfile']
-
-#log4j.properties
-log4j_props = config['configurations']['solr-log4j']['content']
-
-solr_in_sh_template = config['configurations']['solr-env']['content']
-
-solr_pid_file = status_params.solr_pid_file
-
-solr_hdfs_home_dir = config['configurations']['solr-env']['solr_hdfs_home_dir']
-solr_hdfs_user_mode = 0775
-
-smokeuser = config['configurations']['cluster-env']['smokeuser']
-smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
-smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-
-hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-hadoop_bin_dir = conf_select.get_hadoop_dir("bin")
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-hdfs_site = config['configurations']['hdfs-site']
-default_fs = config['configurations']['core-site']['fs.defaultFS']
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
-kinit_path_local = get_kinit_path()
-import functools
-#create partial functions with common arguments for every HdfsDirectory call
-#to create hdfs directory we need to call params.HdfsDirectory in code
-HdfsResource = functools.partial(
-  HdfsResource,
-  user=hdfs_user,
-  security_enabled = security_enabled,
-  keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local,
-  hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir,
-  principal_name = hdfs_principal_name,
-  hdfs_site = hdfs_site,
-  default_fs = default_fs
-)
-

+ 0 - 91
ambari-server/src/main/resources/common-services/SOLR/5.1.0.4.1/package/scripts/service_check.py

@@ -1,91 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-
-from resource_management import *
-from resource_management.libraries.functions.validate import call_and_match_output
-import subprocess
-import time
-
-class SolrServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    command = "curl"
-    httpGssnegotiate = "--negotiate"
-    userpswd = "-u:"
-    insecure = "-k"
-    silent = "-s"
-    out = "-o /dev/null"
-    head = "-w'%{http_code}'"
-    url = "http://" + params.solr_server_host + ":" + str(params.solr_port) + "/solr/"
-    url_server_check = url + '#/'
-
-    command_with_flags = [command, silent, out, head, httpGssnegotiate, userpswd, insecure, url_server_check]
-
-    is_running = False
-    for i in range(1,11):
-      proc = subprocess.Popen(command_with_flags, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-      Logger.info("Try %d, command: %s" % (i, " ".join(command_with_flags)))
-      (stdout, stderr) = proc.communicate()
-      response = stdout
-      if '200' in response:
-        is_running = True
-        Logger.info('Solr Server up and running')
-        break
-      Logger.info("Response: %s" % str(response))
-      time.sleep(5)
-
-    if is_running == False :
-      Logger.info('Solr Server not running.')
-      raise ComponentIsNotRunning()
-
-    if params.security_enabled:
-        kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
-        Execute(kinit_cmd,
-                user = params.smokeuser,
-                logoutput = True
-        )
-
-    create_collection_cmd = format("SOLR_INCLUDE={solr_conf_dir}/solr.in.sh solr create -c smokeuser-ExampleCollection -s 2 -d data_driven_schema_configs")
-    create_collection_output = "success"
-    create_collection_exists_output = "Collection 'smokeuser-ExampleCollection' already exists!"
-
-    Logger.info("Creating solr collection from example: %s" % create_collection_cmd)
-    call_and_match_output(create_collection_cmd, format("({create_collection_output})|({create_collection_exists_output})"), "Failed to create collection")
-
-    list_collection_cmd = "curl " + url + "admin/collections?action=list"
-    list_collection_output = "<str>smokeuser-ExampleCollection</str>"
-    Logger.info("List Collections: %s" % list_collection_cmd)
-    call_and_match_output(list_collection_cmd, format("({list_collection_output})"), "Failed to create collection \"smokeuser-ExampleCollection\" or check that collection exists")
-
-    delete_collection_cmd = format("SOLR_INCLUDE={solr_conf_dir}/solr.in.sh solr delete -c smokeuser-ExampleCollection")
-
-    Logger.info("Deleting solr collection : %s" % delete_collection_cmd)
-
-    Execute(delete_collection_cmd,
-      user = params.solr_user,
-      logoutput=True
-    )
-
-
-if __name__ == "__main__":
-  SolrServiceCheck().execute()

+ 0 - 142
ambari-server/src/main/resources/common-services/SOLR/5.1.0.4.1/package/scripts/solr.py

@@ -1,142 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from resource_management.libraries.functions import conf_select
-import sys
-import os
-
-def solr(type = None, upgrade_type=None):
-  import params
-
-  if type == 'server':
-    effective_version = params.iop_stack_version if upgrade_type is None else format_hdp_stack_version(params.version)
-
-    params.HdfsResource(params.solr_hdfs_home_dir,
-                         type="directory",
-                         action="create_on_execute",
-                         owner=params.solr_user,
-                         mode=params.solr_hdfs_user_mode
-    )
-    params.HdfsResource(None, action="execute")
-
-    Directory([params.log_dir,params.pid_dir,params.solr_conf_dir],
-              mode=0755,
-              cd_access='a',
-              owner=params.solr_user,
-              recursive=True,
-              group=params.user_group
-      )
-
-    XmlConfig("solr-site.xml",
-              conf_dir=params.solr_conf_dir,
-              configurations=params.solr_site,
-              configuration_attributes=params.config['configuration_attributes']['solr-site'],
-              owner=params.solr_user,
-              group=params.user_group,
-              mode=0644
-    )
-
-    File(format("{solr_conf_dir}/solr.in.sh"),
-         content=InlineTemplate(params.solr_in_sh_template),
-         owner=params.solr_user,
-         group=params.user_group
-    )
-
-    File(format("{solr_conf_dir}/log4j.properties"),
-           mode=0644,
-           group=params.user_group,
-           owner=params.solr_user,
-           content=params.log4j_props
-    )
-
-    File(format("{solr_conf_dir}/log4j.properties"),
-           mode=0644,
-           group=params.user_group,
-           owner=params.solr_user,
-           content=params.log4j_props
-    )
-
-    Directory(params.lib_dir,
-              mode=0755,
-              cd_access='a',
-              owner=params.solr_user,
-              recursive=True,
-              group=params.user_group
-    )
-
-    if effective_version is not None and effective_version != "" and compare_versions(effective_version, '4.2.0.0') >= 0:
-      File(format("{lib_dir}/solr.xml"),
-              mode=0644,
-              group=params.user_group,
-              owner=params.solr_user,
-              content=Template("solr.xml.j2")
-      )
-    else:
-      Directory(format("{lib_dir}/data"),
-              owner=params.solr_user,
-              recursive=True,
-              group=params.user_group
-      )
-
-      File(format("{lib_dir}/data/solr.xml"),
-              mode=0644,
-              group=params.user_group,
-              owner=params.solr_user,
-              content=Template("solr.xml.j2")
-      )
-
-    #solr-webapp is temp dir, need to own by solr in order for it to wirte temp files into.
-    Directory(format("{solr_home}/server/solr-webapp"),
-              owner=params.solr_user,
-              recursive=True,
-    )
-
-  elif type == '4103':
-    solr41_conf_dir = "/usr/iop/4.1.0.0/solr/conf"
-    solr41_etc_dir="/etc/solr/4.1.0.0/0"
-    if not os.path.exists(solr41_etc_dir):
-      Execute("mkdir -p /etc/solr/4.1.0.0/0")
-
-    content_path=solr41_conf_dir
-    if not os.path.isfile("/usr/iop/4.1.0.0/solr/conf/solr.in.sh"):
-      content_path = "/etc/solr/conf.backup"
-
-    for each in os.listdir(content_path):
-      File(os.path.join(solr41_etc_dir, each),
-           owner=params.solr_user,
-           content = StaticFile(os.path.join(content_path,each)))
-
-    if not os.path.islink(solr41_conf_dir):
-      Directory(solr41_conf_dir,
-                action="delete",
-                recursive=True)
-
-    if os.path.islink(solr41_conf_dir):
-      os.unlink(solr41_conf_dir)
-
-    if not os.path.islink(solr41_conf_dir):
-      Link(solr41_conf_dir,
-           to=solr41_etc_dir
-      )
-
-    conf_select.select(params.stack_name, "solr", "4.1.0.0")
-
-

+ 0 - 36
ambari-server/src/main/resources/common-services/SOLR/5.1.0.4.1/package/scripts/solr_client.py

@@ -1,36 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-class SolrClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    
-
-  def configure(self, env):
-    print 'Configure the solr client';
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-if __name__ == "__main__":
-  SolrClient().execute()

+ 0 - 110
ambari-server/src/main/resources/common-services/SOLR/5.1.0.4.1/package/scripts/solr_server.py

@@ -1,110 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import iop_select
-from solr_service import solr_service
-from solr import solr
-import os
-
-class SolrServer(Script):
-  def install(self, env):
-    self.install_packages(env)
-
-  def configure(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    if not os.path.isfile("/usr/iop/4.1.0.0/solr/conf/solr.in.sh"):
-      solr(type='4103', upgrade_type=upgrade_type)
-    solr(type='server', upgrade_type=upgrade_type)
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    Logger.info("Executing Stack Upgrade pre-restart")
-    import params
-    env.set_params(params)
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '4.1.0.0') >= 0:
-      iop_select.select("solr-server", params.version)
-      conf_select.select(params.stack_name, "solr", params.version)
-    
-  def start(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    self.configure(env)
-    solr_service(action = 'start')
-
-  def stop(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    solr_service(action = 'stop')
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.solr_pid_file)
-
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"solr.hdfs.security.kerberos.enabled":"true"}
-      props_empty_check = ["solr.hdfs.security.kerberos.keytabfile",
-                           "solr.hdfs.security.kerberos.principal"]
-      props_read_check = ["solr.hdfs.security.kerberos.keytabfile"]
-      solr_site_props = build_expectations('solr-site', props_value_check, props_empty_check, props_read_check)
-
-      solr_expectations = {}
-      solr_expectations.update(solr_site_props)
-
-      security_params = get_params_from_filesystem(status_params.solr_conf_dir,
-                                                   {'solr-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params,solr_expectations)
-
-      if not result_issues: # If all validations passed successfully
-        try:
-          if 'solr-site' not in security_params \
-            or 'solr.hdfs.security.kerberos.keytabfile' not in security_params['solr-site'] \
-            or 'solr.hdfs.security.kerberos.principal' not in security_params['solr-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.solr_user,
-                                security_params['solr-site']['solr.hdfs.security.kerberos.keytabfile'],
-                                security_params['solr-site']['solr.hdfs.security.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
-
-if __name__ == "__main__":
-  SolrServer().execute()

+ 0 - 59
ambari-server/src/main/resources/common-services/SOLR/5.1.0.4.1/package/scripts/solr_service.py

@@ -1,59 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-def solr_service(action='start'):
-  import params
-  cmd = format("{solr_home}/bin/solr")
-
-  if action == 'start':
-
-    if params.security_enabled:
-      if params.solr_principal is None:
-        solr_principal_with_host = 'missing_principal'
-      else:
-        solr_principal_with_host = params.solr_principal.replace("_HOST", params.hostname)
-      kinit_cmd = format("{kinit_path_local} -kt {solr_keytab} {solr_principal_with_host};")
-      Execute(kinit_cmd,user=params.solr_user)
-
-    Execute (params.solr_home+'/server/scripts/cloud-scripts/zkcli.sh -zkhost ' + params.zookeeper_hosts_list + ' -cmd makepath ' + params.zookeeper_chroot, user=params.solr_user, ignore_failures=True )
-
-    if (params.upgrade_direction is not None and params.upgrade_direction == Direction.UPGRADE) or (compare_versions(format_hdp_stack_version(params.current_version), '4.2.0.0') >= 0):
-      solr_home_dir = params.solr_data_dir
-    else:
-      solr_home_dir = params.lib_dir + "/data"
-
-    daemon_cmd = format("SOLR_INCLUDE={solr_conf_dir}/solr.in.sh {cmd} start -c -s {solr_home_dir} -V")
-    no_op_test = format("ls {solr_pid_file} >/dev/null 2>&1 && ps `cat {solr_pid_file}` >/dev/null 2>&1")
-    Execute(daemon_cmd,
-            not_if=no_op_test,
-            user=params.solr_user
-    )
-  elif action == 'stop':
-    daemon_cmd = format("SOLR_INCLUDE={solr_conf_dir}/solr.in.sh {cmd} stop")
-    no_op_test = format("! ((`SOLR_INCLUDE={solr_conf_dir}/solr.in.sh {cmd} status |grep process |wc -l`))")
-    rm_pid = format("rm -f {solr_pid_file}")
-    Execute(daemon_cmd,
-            not_if=no_op_test,
-            user=params.solr_user
-    )
-    Execute(rm_pid)

+ 0 - 135
ambari-server/src/main/resources/common-services/SOLR/5.1.0.4.1/package/scripts/solr_upgrade.py

@@ -1,135 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-from resource_management import *
-from resource_management.core.logger import Logger
-from resource_management.core.resources.system import Execute
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import get_unique_id_and_date
-
-class SolrServerUpgrade(Script):
-  def pre_upgrade_conf41(self, env):
-    """
-    Create /etc/solr/4.1.0.0/0 directory and copies Solr config files here.
-    Create symlinks accordingly.
-
-    conf-select create-conf-dir --package solr --stack-version 4.1.0.0 --conf-version 0
-    cp -r /usr/iop/4.1.0.0/solr/conf/* /etc/solr/4.1.0.0/0/.
-    unlink or rm -r /usr/iop/4.1.0.0/solr/conf
-    ln -s /etc/solr/4.1.0.0/0 /usr/iop/4.1.0.0/solr/conf
-    conf-select set-conf-dir --package solr --stack-version 4.1.0.0 --conf-version 0
-    """
-    import params
-    env.set_params(params)
-
-    solr41_conf_dir="/usr/iop/4.1.0.0/solr/conf"
-    solr41_etc_dir="/etc/solr/4.1.0.0/0"
-    if not os.path.exists(solr41_etc_dir):
-      conf_select.create(params.stack_name, "solr", "4.1.0.0")
-
-    content_path=solr41_conf_dir
-    if not os.path.isfile("/usr/iop/4.1.0.0/solr/conf/solr.in.sh"):
-      content_path = "/etc/solr/conf.backup"
-
-    for each in os.listdir(content_path):
-      File(os.path.join(solr41_etc_dir, each),
-           owner=params.solr_user,
-           content = StaticFile(os.path.join(content_path, each)))
-
-    if not os.path.islink(solr41_conf_dir):
-      Directory(solr41_conf_dir,
-                action="delete",
-                recursive=True)
-
-    if os.path.islink(solr41_conf_dir):
-      os.unlink(solr41_conf_dir)
-
-    if not os.path.islink(solr41_conf_dir):
-      Link(solr41_conf_dir,
-           to=solr41_etc_dir
-      )
-
-    conf_select.select(params.stack_name, "solr", "4.1.0.0")
-
-  def pre_stop_backup_cores(self, env):
-    """
-    Backs up the Solr cores under Solr's home directory.
-    cp -r /var/lib/solr/data/* /tmp/solr/cores
-    """
-    import params
-    env.set_params(params)
-
-    if compare_versions(format_hdp_stack_version(params.current_version), '4.2.0.0') >= 0:
-      solr_home_dir=params.solr_data_dir
-    else: #4.1.0.0
-      solr_home_dir=params.old_lib_dir + "/data"
-
-    unique = get_unique_id_and_date()
-    backup_solr_dir="/tmp/upgrades/{0}/solr_{1}".format(params.current_version, unique)
-    backup_solr_cores="/tmp/solr/cores"
-
-    if os.path.isdir(solr_home_dir) and not os.path.isdir(backup_solr_dir):
-      os.makedirs(backup_solr_dir)
-      Execute(('cp', '-r', solr_home_dir+"/.", backup_solr_dir),
-              sudo=True
-      )
-
-    if params.upgrade_direction is not None and params.upgrade_direction == Direction.UPGRADE:
-      Directory(backup_solr_cores,
-                action="delete",
-                recursive=True)
-
-      Directory(backup_solr_cores,
-                mode=0755,
-                cd_access='a',
-                owner=params.solr_user,
-                recursive=True,
-                group=params.user_group
-      )
-
-      Execute(('cp', '-r', solr_home_dir+"/.", backup_solr_cores),
-              user=params.solr_user
-      )
-
-  def pre_start_migrate_cores(self, env):
-    """
-    Copy the Solr cores from previous version to the new Solr home directory if solr_home is a differnet directory.
-    cp -r /tmp/solr/cores/* /opt/solr/data/.
-    """
-    import params
-    env.set_params(params)
-
-    if params.upgrade_direction is not None and params.upgrade_direction == Direction.UPGRADE:
-      backup_solr_cores="/tmp/solr/cores"
-      solr_home_dir=params.solr_data_dir
-
-      Directory(format(solr_home_dir),
-                owner=params.solr_user,
-                recursive=True,
-                group=params.user_group
-      )
-
-      if os.path.isdir(solr_home_dir) and os.path.isdir(backup_solr_cores):
-        Execute(('cp', '-rn', backup_solr_cores+"/.", solr_home_dir),
-                 user=params.solr_user,
-                 logoutput=True
-        )
-
-if __name__ == "__main__":
-  SolrServerUpgrade().execute()

+ 0 - 32
ambari-server/src/main/resources/common-services/SOLR/5.1.0.4.1/package/scripts/status_params.py

@@ -1,32 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import status_params
-
-config = Script.get_config()
-
-solr_user = config['configurations']['solr-env']['solr_user']
-hostname = config['hostname']
-kinit_path_local = functions.get_kinit_path()
-tmp_dir = Script.get_tmp_dir()
-solr_pid_dir = config['configurations']['solr-env']['solr_pid_dir']
-solr_port = config['configurations']['solr-env']['solr_port']
-solr_pid_file = format("{solr_pid_dir}/solr-{solr_port}.pid")

+ 0 - 51
ambari-server/src/main/resources/common-services/SOLR/5.1.0.4.1/package/templates/solr.xml.j2

@@ -1,51 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" ?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<!--
-   This is an example of a simple "solr.xml" file for configuring one or 
-   more Solr Cores, as well as allowing Cores to be added, removed, and 
-   reloaded via HTTP requests.
-
-   More information about options available in this configuration file, 
-   and Solr Core administration can be found online:
-   http://wiki.apache.org/solr/CoreAdmin
--->
-
-<solr>
-
-  <solrcloud>
-
-    <str name="host">${host:}</str>
-    <int name="hostPort">${jetty.port:8983}</int>
-    <str name="hostContext">${hostContext:solr}</str>
-
-    <bool name="genericCoreNodeNames">${genericCoreNodeNames:true}</bool>
-
-    <int name="zkClientTimeout">${zkClientTimeout:30000}</int>
-    <int name="distribUpdateSoTimeout">${distribUpdateSoTimeout:600000}</int>
-    <int name="distribUpdateConnTimeout">${distribUpdateConnTimeout:60000}</int>
-
-  </solrcloud>
-
-  <shardHandlerFactory name="shardHandlerFactory"
-    class="HttpShardHandlerFactory">
-    <int name="socketTimeout">${socketTimeout:600000}</int>
-    <int name="connTimeout">${connTimeout:60000}</int>
-  </shardHandlerFactory>
-
-</solr>

+ 0 - 234
ambari-server/src/main/resources/common-services/SOLR/5.5.0.4.2/configuration/solr-env.xml

@@ -1,234 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<configuration>
-   <property>
-    <name>solr_user</name>
-    <description>User to run Solr as</description>
-    <property-type>USER</property-type>
-    <value>solr</value>
-  </property>
-  
-  <property>
-    <name>solr_data_dir</name>
-    <value>/opt/solr/data</value>
-    <description>Solr Home Directory for writable Solr files and index data</description>
-  </property>
-
-  <property>
-    <name>solr_pid_dir</name>
-    <value>/var/run/solr</value>
-  </property>
-
-  <property>
-    <name>solr_log_dir</name>
-    <value>/var/log/solr</value>
-  </property>
-
-  <property>
-    <name>solr_port</name>
-    <value>8983</value>
-    <description>Sets the port Solr binds to, default is 8983</description>
-  </property>
-
-  <property>
-    <name>solr_hdfs_home_dir</name>
-    <value>/apps/solr/data</value>
-    <description>A root location in HDFS for Solr to write collection data to. Rather than specifying an HDFS location for the data directory or update log directory, use this to specify one root location and have everything automatically created within this HDFS</description>
-  </property>
-
-  
-  <property>
-    <name>ZOOKEEPER_CHROOT</name>
-    <value>/solr</value>
-    <description>If you're using a ZooKeeper instance that is shared by other systems, it's recommended to isolate the SolrCloud znode tree using ZooKeeper's chroot support. 
-    For instance, to ensure all znodes created by SolrCloud are stored under /solr, you can put /solr on the end of your ZK_HOST connection string, such as: ZK_HOST=zk1,zk2,zk3/solr</description>
-  </property>
-
-  <property>
-      <name>solr_xms_minmem</name>
-      <value>512</value>
-      <description>Set Xms value for Solr in MB</description>
-      <value-attributes>
-          <type>int</type>
-          <minimum>0</minimum>
-          <maximum>268435456</maximum>
-          <unit>MB</unit>
-          <increment-step>1</increment-step>
-      </value-attributes>
-  </property>
-
-  <property>
-      <name>solr_xmx_maxmem</name>
-      <value>512</value>
-      <description>Set Xmx value for Solr in MB</description>
-      <value-attributes>
-          <type>int</type>
-          <minimum>{{solr_xms_minmem}}</minimum>
-          <maximum>268435456</maximum>
-          <unit>MB</unit>
-          <increment-step>1</increment-step>
-      </value-attributes>
-  </property>
-
- <property>
-    <name>content</name>
-    <description>This is the jinja template for solr.in.sh file</description>
-    <value>
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-export JAVA_HOME={{java64_home}}
-
-# By default the script will use JAVA_HOME to determine which java
-# to use, but you can set a specific path for Solr to use without
-# affecting other Java applications on your server/workstation.
-#SOLR_JAVA_HOME=""
-
-
-
-# Increase Java Min/Max Heap as needed to support your indexing / query needs
-SOLR_JAVA_MEM="-Xms{{solr_xms_minmem}}m -Xmx{{solr_xmx_maxmem}}m"
-
-# Enable verbose GC logging
-GC_LOG_OPTS="-verbose:gc -XX:+PrintHeapAtGC -XX:+PrintGCDetails \
--XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+PrintTenuringDistribution -XX:+PrintGCApplicationStoppedTime"
-
-
-# These GC settings have shown to work well for a number of common Solr workloads
-GC_TUNE="-XX:NewRatio=3 \
--XX:SurvivorRatio=4 \
--XX:TargetSurvivorRatio=90 \
--XX:MaxTenuringThreshold=8 \
--XX:+UseConcMarkSweepGC \
--XX:+UseParNewGC \
--XX:ConcGCThreads=4 -XX:ParallelGCThreads=4 \
--XX:+CMSScavengeBeforeRemark \
--XX:PretenureSizeThreshold=64m \
--XX:+UseCMSInitiatingOccupancyOnly \
--XX:CMSInitiatingOccupancyFraction=50 \
--XX:CMSMaxAbortablePrecleanTime=6000 \
--XX:+CMSParallelRemarkEnabled \
--XX:+ParallelRefProcEnabled \
--XX:MaxDirectMemorySize=20g"
-
-
-# Set the ZooKeeper connection string if using an external ZooKeeper ensemble
-# e.g. host1:2181,host2:2181/chroot
-# Leave empty if not using SolrCloud
-#ZK_HOST=""
-
-
-
-# Set the ZooKeeper client timeout (for SolrCloud mode)
-#ZK_CLIENT_TIMEOUT="15000"
-
-
-# By default the start script uses "localhost"; override the hostname here
-# for production SolrCloud environments to control the hostname exposed to cluster state
-#SOLR_HOST="192.168.1.1"
-
-# By default the start script uses UTC; override the timezone if needed
-#SOLR_TIMEZONE="UTC"
-
-# Set to true to activate the JMX RMI connector to allow remote JMX client applications
-# to monitor the JVM hosting Solr; set to "false" to disable that behavior
-# (false is recommended in production environments)
-ENABLE_REMOTE_JMX_OPTS="false"
-
-
-# The script will use SOLR_PORT+10000 for the RMI_PORT or you can set it here
-# RMI_PORT=18983
-
-
-# Anything you add to the SOLR_OPTS variable will be included in the java
-# start command line as-is, in ADDITION to other options. If you specify the
-# -a option on start script, those options will be appended as well. Examples:
-#SOLR_OPTS="$SOLR_OPTS -Dsolr.autoSoftCommit.maxTime=3000"
-#SOLR_OPTS="$SOLR_OPTS -Dsolr.autoCommit.maxTime=60000"
-#SOLR_OPTS="$SOLR_OPTS -Dsolr.clustering.enabled=true"
-
-
-
-# Location where the bin/solr script will save PID files for running instances
-# If not set, the script will create PID files in $SOLR_TIP/bin
-#SOLR_PID_DIR=
-
-
-
-# Path to a directory where Solr creates index files, the specified directory
-# must contain a solr.xml; by default, Solr will use server/solr
-#SOLR_HOME=
-
-
-
-# Solr provides a default Log4J configuration properties file in server/resources
-# however, you may want to customize the log settings and file appender location
-# so you can point the script to use a different log4j.properties file
-#LOG4J_PROPS=/var/solr/log4j.properties
-
-
-
-# Location where Solr should write logs to; should agree with the file appender
-# settings in server/resources/log4j.properties
-#SOLR_LOGS_DIR=
-
-
-
-# Sets the port Solr binds to, default is 8983
-#SOLR_PORT=8983
-
-
-
-# Uncomment to set SSL-related system properties
-# Be sure to update the paths to the correct keystore for your environment
-#SOLR_SSL_OPTS="-Djavax.net.ssl.keyStore=etc/solr-ssl.keystore.jks \
-#-Djavax.net.ssl.keyStorePassword=secret \
-#-Djavax.net.ssl.trustStore=etc/solr-ssl.keystore.jks \
-#-Djavax.net.ssl.trustStorePassword=secret"
-
-
-
-# Uncomment to set a specific SSL port (-Djetty.ssl.port=N); if not set
-# and you are using SSL, then the start script will use SOLR_PORT for the SSL port
-#SOLR_SSL_PORT=
-
-
-SOLR_PID_DIR={{pid_dir}}
-SOLR_HOME={{solr_data_dir}}
-LOG4J_PROPS={{solr_conf_dir}}/log4j.properties
-SOLR_LOGS_DIR={{log_dir}}
-SOLR_PORT={{solr_port}}
-SOLR_MODE=solrcloud
-ZK_HOST={{zookeeper_hosts_list}}{{zookeeper_chroot}}
-SOLR_HOST={{hostname}}
-
-# Comment out the following SOLR_OPTS setting to config Solr to write its index and transaction log files to local filesystem. 
-# Data (index and transaction log files) exists on HDFS will not be moved to local filesystem, 
-# after you change this config, they will not be available from local filesystem.
-SOLR_OPTS="-Dsolr.directoryFactory=HdfsDirectoryFactory \
--Dsolr.lock.type=hdfs \
--Dsolr.hdfs.confdir=/etc/hadoop/conf \
--Dsolr.hdfs.home={{fs_root}}{{solr_hdfs_home_dir}} \
--Dsolr.hdfs.security.kerberos.enabled={{sole_kerberos_enabled}} \
--Dsolr.hdfs.security.kerberos.keytabfile={{solr_keytab}} \
--Dsolr.hdfs.security.kerberos.principal={{solr_principal}} \
--Dsolr.log4j.dir={{log_dir}}"
-
-    </value>
-  </property>  
-  
-</configuration>

+ 0 - 82
ambari-server/src/main/resources/common-services/SOLR/5.5.0.4.2/configuration/solr-log4j.xml

@@ -1,82 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-
-  <property>
-    <name>content</name>
-    <description>Custom log4j.properties</description>
-    <value>
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-#
-# Solr Logging Configuration
-#
-
-#  Logging level
-solr.log=${solr.log4j.dir}
-log4j.rootLogger=INFO, file, CONSOLE
-
-log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
-
-log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
-log4j.appender.CONSOLE.layout.ConversionPattern=%-4r [%t] %-5p %c %x [%X{collection} %X{shard} %X{replica} %X{core}] \u2013 %m%n
-
-#- size rotation with log cleanup.
-log4j.appender.file=org.apache.log4j.RollingFileAppender
-log4j.appender.file.MaxFileSize=4MB
-log4j.appender.file.MaxBackupIndex=9
-
-#- File to log to and log format
-log4j.appender.file.File=${solr.log}/solr.log
-log4j.appender.file.layout=org.apache.log4j.PatternLayout
-log4j.appender.file.layout.ConversionPattern=%-5p - %d{yyyy-MM-dd HH:mm:ss.SSS}; [%X{collection} %X{shard} %X{replica} %X{core}] %C; %m\n
-
-log4j.logger.org.apache.zookeeper=WARN
-log4j.logger.org.apache.hadoop=WARN
-
-# set to INFO to enable infostream log messages
-log4j.logger.org.apache.solr.update.LoggingInfoStream=OFF
-    </value>
-  </property>
-
-</configuration>

+ 0 - 44
ambari-server/src/main/resources/common-services/SOLR/5.5.0.4.2/configuration/solr-site.xml

@@ -1,44 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="true">
-  <property>
-    <name>solr.hdfs.security.kerberos.enabled</name>
-    <value>false</value>
-    <description>Set to true to enable Kerberos authentication</description>
-  </property>
-
-  <property>
-    <name>solr.hdfs.security.kerberos.keytabfile</name>
-    <value>/etc/security/keytabs/solr.service.keytab</value>
-    <description>A keytab file contains pairs of Kerberos principals and encrypted keys which allows for password-less authentication when Solr attempts to authenticate with secure Hadoop.
-    This file will need to be present on all Solr servers at the same path provided in this parameter.
-    </description>
-  </property>
-
-  <property>
-    <name>solr.hdfs.security.kerberos.principal</name>
-    <value>solr/_HOST@EXAMPLE.COM</value>
-    <description>The Kerberos principal that Solr should use to authenticate to secure Hadoop; the format of a typical Kerberos V5 principal is: primary/instance@realm
-    </description>
-  </property>
-
-</configuration>

+ 0 - 53
ambari-server/src/main/resources/common-services/SOLR/5.5.0.4.2/kerberos.json

@@ -1,53 +0,0 @@
-{
-  "services": [
-    {
-      "name": "SOLR",
-      "identities": [
-        {
-          "name": "/spnego"
-        },
-        {
-          "name": "/smokeuser"
-        },
-        {
-          "name": "/hdfs"
-        }
-      ],
-      "configurations": [
-        {
-          "solr-site": {
-              "solr.hdfs.security.kerberos.enabled":"true"
-          }
-        }
-      ],
-      "components": [
-        {
-          "name": "SOLR",
-          "identities": [
-            {
-              "name": "solr",
-              "principal": {
-                "value": "${solr-env/solr_user}/_HOST@${realm}",
-                "type": "service",
-                "configuration": "solr-site/solr.hdfs.security.kerberos.principal",
-                "local_username": "${solr-env/solr_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/solr.service.keytab",
-                "owner": {
-                  "name": "${solr-env/solr_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "solr-site/solr.hdfs.security.kerberos.keytabfile"
-              }
-            }
-          ]
-        }
-      ]
-    }
-  ]
-}

+ 0 - 82
ambari-server/src/main/resources/common-services/SOLR/5.5.0.4.2/metainfo.xml

@@ -1,82 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>SOLR</name>
-      <displayName>Solr</displayName>
-      <comment>Solr is the popular, blazing fast open source enterprise search platform from the Apache Lucene project
-      </comment>
-      <version>5.5.0.4.2</version>
-
-      <components>
-        <component>
-          <name>SOLR</name>
-          <displayName>Solr</displayName>
-          <category>MASTER</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/solr_server.py</script>
-            <scriptType>PYTHON</scriptType>
-	    <timeout>600</timeout>
-          </commandScript>
-          <configFiles>
-            <configFile>
-              <type>xml</type>
-              <fileName>solr-site.xml</fileName>
-              <dictionaryName>solr-site</dictionaryName>
-            </configFile>
-          </configFiles>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>solr_4_2_*</name>
-            </package>
-            <package>
-              <name>titan_4_2_*</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <requiredServices>		
-        <service>HDFS</service>
-      </requiredServices>
-      
-      <configuration-dependencies>
-        <config-type>solr-log4j</config-type>
-        <config-type>solr-env</config-type>
-        <config-type>solr-site</config-type>
-        <config-type>titan-hbase-solr</config-type>
-        <restartRequiredAfterChange>true</restartRequiredAfterChange>
-      </configuration-dependencies>
-    </service>
-  </services>
-</metainfo>
-

+ 0 - 19
ambari-server/src/main/resources/common-services/SOLR/5.5.0.4.2/package/scripts/__init__.py

@@ -1,19 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""

+ 0 - 156
ambari-server/src/main/resources/common-services/SOLR/5.5.0.4.2/package/scripts/params.py

@@ -1,156 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-from resource_management.libraries.script import Script
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.resources import HdfsResource
-import status_params
-
-# server configurations
-config = Script.get_config()
-stack_name = default("/hostLevelParams/stack_name", None)
-
-zookeeper_hosts = config['clusterHostInfo']['zookeeper_hosts']
-zookeeper_hosts.sort()
-zookeeper_hosts_list=','.join(zookeeper_hosts)
-
-java64_home = config['hostLevelParams']['java_home']
-
-# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade.
-# Version being upgraded/downgraded to
-# It cannot be used during the initial Cluser Install because the version is not yet known.
-version = default("/commandParams/version", None)
-
-# current host stack version
-current_version = default("/hostLevelParams/current_version", None)
-stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-iop_stack_version = format_hdp_stack_version(stack_version_unformatted)
-
-# Upgrade direction
-upgrade_direction = default("/commandParams/upgrade_direction", None)
-
-solr_user=config['configurations']['solr-env']['solr_user']
-user_group=config['configurations']['cluster-env']['user_group']
-hostname = config['hostname']
-solr_server_hosts = config['clusterHostInfo']['solr_hosts'] 
-solr_server_host = solr_server_hosts[0]
-
-fs_root = config['configurations']['core-site']['fs.defaultFS']
-
-solr_home = '/usr/iop/current/solr-server'
-solr_conf_dir='/usr/iop/current/solr-server/conf'
-cloud_scripts=solr_home+'/server/scripts/cloud-scripts'
-if (current_version is not None and compare_versions(format_hdp_stack_version(current_version), '4.2.0.0') >=0 ) or  compare_versions(iop_stack_version, '4.2.0.0')>= 0:
-  if upgrade_direction is not None and upgrade_direction == Direction.DOWNGRADE and version is not None and compare_versions(format_hdp_stack_version(version), '4.2.0.0') < 0:
-    solr_data_dir=default("/configurations/solr-env/solr_lib_dir", None)
-  else:
-    solr_data_dir=default("/configurations/solr-env/solr_data_dir", None)
-else: #IOP 4.1
-  if upgrade_direction is not None and upgrade_direction == Direction.UPGRADE:
-    solr_data_dir=default("/configurations/solr-env/solr_data_dir", None)
-    lib_dir=default("/configurations/solr-env/solr_data_dir", None)
-    old_lib_dir=default("/configurations/solr-env/solr_lib_dir", None)
-  else:
-    solr_data_dir=default("/configurations/solr-env/solr_lib_dir", None)
-    lib_dir=default("/configurations/solr-env/solr_lib_dir", None)
-log_dir=config['configurations']['solr-env']['solr_log_dir']
-pid_dir=config['configurations']['solr-env']['solr_pid_dir']
-solr_port=config['configurations']['solr-env']['solr_port']
-
-zookeeper_chroot=config['configurations']['solr-env']['ZOOKEEPER_CHROOT']
-
-solr_xms_minmem = config['configurations']['solr-env']['solr_xms_minmem']
-solr_xmx_maxmem = config['configurations']['solr-env']['solr_xmx_maxmem']
-
-solr_site = dict(config['configurations']['solr-site'])
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-solr_principal = solr_site['solr.hdfs.security.kerberos.principal']
-
-if security_enabled:
-  solr_principal = solr_principal.replace('_HOST',hostname)
-  solr_site['solr.hdfs.security.kerberos.principal']=solr_principal
-
-#kerberos
-sole_kerberos_enabled=config['configurations']['solr-site']['solr.hdfs.security.kerberos.enabled']
-solr_keytab=config['configurations']['solr-site']['solr.hdfs.security.kerberos.keytabfile']
-
-#log4j.properties
-log4j_props = config['configurations']['solr-log4j']['content']
-
-solr_in_sh_template = config['configurations']['solr-env']['content']
-
-solr_pid_file = status_params.solr_pid_file
-
-solr_hdfs_home_dir = config['configurations']['solr-env']['solr_hdfs_home_dir']
-solr_hdfs_user_mode = 0775
-
-smokeuser = config['configurations']['cluster-env']['smokeuser']
-smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
-smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-
-hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-hadoop_bin_dir = conf_select.get_hadoop_dir("bin")
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-hdfs_site = config['configurations']['hdfs-site']
-default_fs = config['configurations']['core-site']['fs.defaultFS']
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
-kinit_path_local = get_kinit_path()
-
-# parameters for intgeration with Titan
-configuration_tags = config['configurationTags']
-
-# Intgerate with Titan
-# parse the value for property 'index.search.solr.configset' in titan-hbase-solr
-titan_solr_configset = 'titan'
-if ('titan-hbase-solr' in configuration_tags):
-    titan_hbase_solr_props = config['configurations']['titan-hbase-solr']['content']
-    prop_list = titan_hbase_solr_props.split('\n')
-    for prop in prop_list:
-      if (prop.find('index.search.solr.configset') > -1):
-         titan_solr_configset_prop = prop.split('=')
-         titan_solr_configset = titan_solr_configset_prop[1]
-
-titan_solr_conf_dir = format('/usr/iop/current/titan-client/conf/solr')
-solr_conf_trg_dir = format('/usr/iop/current/solr-server/server/solr/configsets')
-solr_solr_conf_dir = format('/usr/iop/current/solr-server/server/solr/configsets/solr')
-solr_titan_conf_dir = format('/usr/iop/current/solr-server/server/solr/configsets/{titan_solr_configset}')
-titan_solr_jar_file = format('/usr/iop/current/titan-client/lib/jts-1.13.jar')
-solr_jar_trg_file =  format('/usr/iop/current/solr-server/server/solr-webapp/webapp/WEB-INF/lib/jts-1.13.jar')
-solr_conf_trg_file = format('/usr/iop/current/solr-server/server/solr/configsets/{titan_solr_configset}/solrconfig.xml')
-
-import functools
-#create partial functions with common arguments for every HdfsDirectory call
-#to create hdfs directory we need to call params.HdfsDirectory in code
-HdfsResource = functools.partial(
-  HdfsResource,
-  user=hdfs_user,
-  security_enabled = security_enabled,
-  keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local,
-  hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir,
-  principal_name = hdfs_principal_name,
-  hdfs_site = hdfs_site,
-  default_fs = default_fs
-)
-

+ 0 - 91
ambari-server/src/main/resources/common-services/SOLR/5.5.0.4.2/package/scripts/service_check.py

@@ -1,91 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-
-from resource_management import *
-from resource_management.libraries.functions.validate import call_and_match_output
-import subprocess
-import time
-
-class SolrServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    command = "curl"
-    httpGssnegotiate = "--negotiate"
-    userpswd = "-u:"
-    insecure = "-k"
-    silent = "-s"
-    out = "-o /dev/null"
-    head = "-w'%{http_code}'"
-    url = "http://" + params.solr_server_host + ":" + str(params.solr_port) + "/solr/"
-    url_server_check = url + '#/'
-
-    command_with_flags = [command, silent, out, head, httpGssnegotiate, userpswd, insecure, url_server_check]
-
-    is_running = False
-    for i in range(1,11):
-      proc = subprocess.Popen(command_with_flags, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-      Logger.info("Try %d, command: %s" % (i, " ".join(command_with_flags)))
-      (stdout, stderr) = proc.communicate()
-      response = stdout
-      if '200' in response:
-        is_running = True
-        Logger.info('Solr Server up and running')
-        break
-      Logger.info("Response: %s" % str(response))
-      time.sleep(5)
-
-    if is_running == False :
-      Logger.info('Solr Server not running.')
-      raise ComponentIsNotRunning()
-
-    if params.security_enabled:
-        kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
-        Execute(kinit_cmd,
-                user = params.smokeuser,
-                logoutput = True
-        )
-
-    create_collection_cmd = format("SOLR_INCLUDE={solr_conf_dir}/solr.in.sh solr create -c smokeuser_ExampleCollection -s 2 -d data_driven_schema_configs")
-    create_collection_output = "success"
-    create_collection_exists_output = "Collection 'smokeuser_ExampleCollection' already exists!"
-
-    Logger.info("Creating solr collection from example: %s" % create_collection_cmd)
-    call_and_match_output(create_collection_cmd, format("({create_collection_output})|({create_collection_exists_output})"), "Failed to create collection")
-
-    list_collection_cmd = "curl " + url + "admin/collections?action=list"
-    list_collection_output = "<str>smokeuser_ExampleCollection</str>"
-    Logger.info("List Collections: %s" % list_collection_cmd)
-    call_and_match_output(list_collection_cmd, format("({list_collection_output})"), "Failed to create collection \"smokeuser_ExampleCollection\" or check that collection exists")
-
-    delete_collection_cmd = format("SOLR_INCLUDE={solr_conf_dir}/solr.in.sh solr delete -c smokeuser_ExampleCollection")
-
-    Logger.info("Deleting solr collection : %s" % delete_collection_cmd)
-
-    Execute(delete_collection_cmd,
-      user = params.solr_user,
-      logoutput=True
-    )
-
-
-if __name__ == "__main__":
-  SolrServiceCheck().execute()

+ 0 - 95
ambari-server/src/main/resources/common-services/SOLR/5.5.0.4.2/package/scripts/solr.py

@@ -1,95 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import sys
-import os
-
-def solr(type = None, upgrade_type=None):
-  import params
-
-  if type == 'server':
-    effective_version = params.iop_stack_version if upgrade_type is None else format_hdp_stack_version(params.version)
-
-    params.HdfsResource(params.solr_hdfs_home_dir,
-                         type="directory",
-                         action="create_on_execute",
-                         owner=params.solr_user,
-                         mode=params.solr_hdfs_user_mode
-    )
-    params.HdfsResource(None, action="execute")
-
-    Directory([params.log_dir,params.pid_dir,params.solr_conf_dir,params.solr_data_dir],
-              mode=0755,
-              cd_access='a',
-              owner=params.solr_user,
-              recursive=True,
-              group=params.user_group
-      )
-
-    XmlConfig("solr-site.xml",
-              conf_dir=params.solr_conf_dir,
-              configurations=params.solr_site,
-              configuration_attributes=params.config['configuration_attributes']['solr-site'],
-              owner=params.solr_user,
-              group=params.user_group,
-              mode=0644
-    )
-
-    File(format("{solr_conf_dir}/solr.in.sh"),
-         content=InlineTemplate(params.solr_in_sh_template),
-         owner=params.solr_user,
-         group=params.user_group
-    )
-
-    File(format("{solr_conf_dir}/log4j.properties"),
-           mode=0644,
-           group=params.user_group,
-           owner=params.solr_user,
-           content=params.log4j_props
-    )
-
-    if effective_version is not None and effective_version != "" and compare_versions(effective_version, '4.2.0.0') >= 0:
-      File(format("{solr_data_dir}/solr.xml"),
-           mode=0644,
-           group=params.user_group,
-           owner=params.solr_user,
-           content=Template("solr.xml.j2")
-      )
-    else:
-      Directory(format("{solr_data_dir}/data"),
-           owner=params.solr_user,
-           recursive=True,
-           group=params.user_group
-      )
-
-      File(format("{solr_data_dir}/data/solr.xml"),
-           mode=0644,
-           group=params.user_group,
-           owner=params.solr_user,
-           content=Template("solr.xml.j2")
-      )
-
-    #solr-webapp is temp dir, need to own by solr in order for it to wirte temp files into.
-    Directory(format("{solr_home}"),
-              owner=params.solr_user,
-              recursive=True,
-    )
-

+ 0 - 36
ambari-server/src/main/resources/common-services/SOLR/5.5.0.4.2/package/scripts/solr_client.py

@@ -1,36 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-class SolrClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    
-
-  def configure(self, env):
-    print 'Configure the solr client';
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-if __name__ == "__main__":
-  SolrClient().execute()

+ 0 - 107
ambari-server/src/main/resources/common-services/SOLR/5.5.0.4.2/package/scripts/solr_server.py

@@ -1,107 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import iop_select
-from solr_service import solr_service
-from solr import solr
-
-class SolrServer(Script):
-  def install(self, env):
-    self.install_packages(env)
-
-  def configure(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    solr(type='server', upgrade_type=upgrade_type)
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    Logger.info("Executing Stack Upgrade pre-restart")
-    import params
-    env.set_params(params)
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '4.1.0.0') >= 0:
-      iop_select.select("solr-server", params.version)
-      conf_select.select(params.stack_name, "solr", params.version)
-
-  def start(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    self.configure(env)
-    solr_service(action = 'start')
-
-  def stop(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    solr_service(action = 'stop')
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.solr_pid_file)
-
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"solr.hdfs.security.kerberos.enabled":"true"}
-      props_empty_check = ["solr.hdfs.security.kerberos.keytabfile",
-                           "solr.hdfs.security.kerberos.principal"]
-      props_read_check = ["solr.hdfs.security.kerberos.keytabfile"]
-      solr_site_props = build_expectations('solr-site', props_value_check, props_empty_check, props_read_check)
-
-      solr_expectations = {}
-      solr_expectations.update(solr_site_props)
-
-      security_params = get_params_from_filesystem(status_params.solr_conf_dir,
-                                                   {'solr-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params,solr_expectations)
-
-      if not result_issues: # If all validations passed successfully
-        try:
-          if 'solr-site' not in security_params \
-            or 'solr.hdfs.security.kerberos.keytabfile' not in security_params['solr-site'] \
-            or 'solr.hdfs.security.kerberos.principal' not in security_params['solr-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.solr_user,
-                                security_params['solr-site']['solr.hdfs.security.kerberos.keytabfile'],
-                                security_params['solr-site']['solr.hdfs.security.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
-
-if __name__ == "__main__":
-  SolrServer().execute()

+ 0 - 71
ambari-server/src/main/resources/common-services/SOLR/5.5.0.4.2/package/scripts/solr_service.py

@@ -1,71 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import os
-from resource_management import *
-from resource_management.libraries.functions.validate import call_and_match_output
-
-def solr_service(action='start'):
-  import params
-  cmd = format("{solr_home}/bin/solr")
-
-  if action == 'start':
-
-    if params.security_enabled:
-      if params.solr_principal is None:
-        solr_principal_with_host = 'missing_principal'
-      else:
-        solr_principal_with_host = params.solr_principal.replace("_HOST", params.hostname)
-      kinit_cmd = format("{kinit_path_local} -kt {solr_keytab} {solr_principal_with_host};")
-      Execute(kinit_cmd,user=params.solr_user)
-
-    Execute ('echo "Creating znode" ' + params.zookeeper_chroot)
-    Execute (params.cloud_scripts + '/zkcli.sh -zkhost ' + params.zookeeper_hosts_list + ' -cmd makepath ' + params.zookeeper_chroot, user=params.solr_user, ignore_failures=True )
-
-    # copy titan directory and jar for titan and solr integration
-    if (('titan-env' in params.configuration_tags) and not (os.path.exists(params.solr_conf_trg_file))):
-            Execute(("cp", "-r", params.titan_solr_conf_dir, params.solr_conf_trg_dir), sudo = True)
-            Execute(("cp", params.titan_solr_jar_file, params.solr_jar_trg_file), sudo = True)
-            Execute(("chmod", "644", params.solr_jar_trg_file), sudo=True)
-            Execute(("mv", params.solr_solr_conf_dir, params.solr_titan_conf_dir), sudo = True)
-
-    daemon_cmd = format("SOLR_INCLUDE={solr_conf_dir}/solr.in.sh {cmd} start -c -V")
-    no_op_test = format("ls {solr_pid_file} >/dev/null 2>&1 && ps `cat {solr_pid_file}` >/dev/null 2>&1")
-    Execute(daemon_cmd,
-            not_if=no_op_test,
-            user=params.solr_user
-    )
-
-    # create collection for titan and solr integration
-    if (('titan-env' in params.configuration_tags) and (os.path.exists(params.solr_conf_trg_file))):
-        create_collection_cmd = format("SOLR_INCLUDE={solr_conf_dir}/solr.in.sh solr create -c {titan_solr_configset} -s 2 -d {titan_solr_configset}")
-        create_collection_output = "success"
-        create_collection_exists_output = format("Collection '{titan_solr_configset}' already exists!")
-        call_and_match_output(create_collection_cmd, format("({create_collection_output})|({create_collection_exists_output})"), "Failed to create collection")
-
-  elif action == 'stop':
-    daemon_cmd = format("export SOLR_PID_DIR=" + params.pid_dir + "; SOLR_INCLUDE={solr_conf_dir}/solr.in.sh {cmd} stop -all")
-    no_op_test = format("! ((`SOLR_INCLUDE={solr_conf_dir}/solr.in.sh {cmd} status |grep process |wc -l`))")
-    rm_pid = format("rm -f {solr_pid_file}")
-    Execute(daemon_cmd,
-            not_if=no_op_test,
-            user=params.solr_user
-    )
-    Execute(rm_pid)

+ 0 - 135
ambari-server/src/main/resources/common-services/SOLR/5.5.0.4.2/package/scripts/solr_upgrade.py

@@ -1,135 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-from resource_management import *
-from resource_management.core.logger import Logger
-from resource_management.core.resources.system import Execute
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import get_unique_id_and_date
-
-class SolrServerUpgrade(Script):
-  def pre_upgrade_conf41(self, env):
-    """
-    Create /etc/solr/4.1.0.0/0 directory and copies Solr config files here.
-    Create symlinks accordingly.
-
-    conf-select create-conf-dir --package solr --stack-version 4.1.0.0 --conf-version 0
-    cp -r /usr/iop/4.1.0.0/solr/conf/* /etc/solr/4.1.0.0/0/.
-    unlink or rm -r /usr/iop/4.1.0.0/solr/conf
-    ln -s /etc/solr/4.1.0.0/0 /usr/iop/4.1.0.0/solr/conf
-    conf-select set-conf-dir --package solr --stack-version 4.1.0.0 --conf-version 0
-    """
-    import params
-    env.set_params(params)
-
-    solr41_conf_dir="/usr/iop/4.1.0.0/solr/conf"
-    solr41_etc_dir="/etc/solr/4.1.0.0/0"
-    if not os.path.exists(solr41_etc_dir):
-      conf_select.create(params.stack_name, "solr", "4.1.0.0")
-
-    content_path=solr41_conf_dir
-    if not os.path.isfile("/usr/iop/4.1.0.0/solr/conf/solr.in.sh"):
-      content_path = "/etc/solr/conf.backup"
-
-    for each in os.listdir(content_path):
-      File(os.path.join(solr41_etc_dir, each),
-           owner=params.solr_user,
-           content = StaticFile(os.path.join(content_path, each)))
-
-    if not os.path.islink(solr41_conf_dir):
-      Directory(solr41_conf_dir,
-                action="delete",
-                recursive=True)
-
-    if os.path.islink(solr41_conf_dir):
-      os.unlink(solr41_conf_dir)
-
-    if not os.path.islink(solr41_conf_dir):
-      Link(solr41_conf_dir,
-           to=solr41_etc_dir
-      )
-
-    conf_select.select(params.stack_name, "solr", "4.1.0.0")
-
-  def pre_stop_backup_cores(self, env):
-    """
-    Backs up the Solr cores under Solr's home directory.
-    cp -r /var/lib/solr/data/* /tmp/solr/cores
-    """
-    import params
-    env.set_params(params)
-
-    if compare_versions(format_hdp_stack_version(params.current_version), '4.2.0.0') >= 0:
-      solr_home_dir=params.solr_data_dir
-    else: #4.1.0.0
-      solr_home_dir=params.old_lib_dir + "/data"
-
-    unique = get_unique_id_and_date()
-    backup_solr_dir="/tmp/upgrades/{0}/solr_{1}".format(params.current_version, unique)
-    backup_solr_cores="/tmp/solr/cores"
-
-    if os.path.isdir(solr_home_dir) and not os.path.isdir(backup_solr_dir):
-      os.makedirs(backup_solr_dir)
-      Execute(('cp', '-r', solr_home_dir+"/.", backup_solr_dir),
-              sudo=True
-      )
-
-    if params.upgrade_direction is not None and params.upgrade_direction == Direction.UPGRADE:
-      Directory(backup_solr_cores,
-                action="delete",
-                recursive=True)
-
-      Directory(backup_solr_cores,
-                mode=0755,
-                cd_access='a',
-                owner=params.solr_user,
-                recursive=True,
-                group=params.user_group
-      )
-
-      Execute(('cp', '-r', solr_home_dir+"/.", backup_solr_cores),
-              user=params.solr_user
-      )
-
-  def pre_start_migrate_cores(self, env):
-    """
-    Copy the Solr cores from previous version to the new Solr home directory if solr_home is a differnet directory.
-    cp -r /tmp/solr/cores/* /opt/solr/data/.
-    """
-    import params
-    env.set_params(params)
-
-    if params.upgrade_direction is not None and params.upgrade_direction == Direction.UPGRADE:
-      backup_solr_cores="/tmp/solr/cores"
-      solr_home_dir=params.solr_data_dir
-
-      Directory(format(solr_home_dir),
-                owner=params.solr_user,
-                recursive=True,
-                group=params.user_group
-      )
-
-      if os.path.isdir(solr_home_dir) and os.path.isdir(backup_solr_cores):
-        Execute(('cp', '-rn', backup_solr_cores+"/.", solr_home_dir),
-                 user=params.solr_user,
-                 logoutput=True
-        )
-
-if __name__ == "__main__":
-  SolrServerUpgrade().execute()

+ 0 - 32
ambari-server/src/main/resources/common-services/SOLR/5.5.0.4.2/package/scripts/status_params.py

@@ -1,32 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import status_params
-
-config = Script.get_config()
-
-solr_user = config['configurations']['solr-env']['solr_user']
-hostname = config['hostname']
-kinit_path_local = functions.get_kinit_path()
-tmp_dir = Script.get_tmp_dir()
-solr_pid_dir = config['configurations']['solr-env']['solr_pid_dir']
-solr_port = config['configurations']['solr-env']['solr_port']
-solr_pid_file = format("{solr_pid_dir}/solr-{solr_port}.pid")

+ 0 - 51
ambari-server/src/main/resources/common-services/SOLR/5.5.0.4.2/package/templates/solr.xml.j2

@@ -1,51 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" ?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<!--
-   This is an example of a simple "solr.xml" file for configuring one or 
-   more Solr Cores, as well as allowing Cores to be added, removed, and 
-   reloaded via HTTP requests.
-
-   More information about options available in this configuration file, 
-   and Solr Core administration can be found online:
-   http://wiki.apache.org/solr/CoreAdmin
--->
-
-<solr>
-
-  <solrcloud>
-
-    <str name="host">${host:}</str>
-    <int name="hostPort">${jetty.port:8983}</int>
-    <str name="hostContext">${hostContext:solr}</str>
-
-    <bool name="genericCoreNodeNames">${genericCoreNodeNames:true}</bool>
-
-    <int name="zkClientTimeout">${zkClientTimeout:30000}</int>
-    <int name="distribUpdateSoTimeout">${distribUpdateSoTimeout:600000}</int>
-    <int name="distribUpdateConnTimeout">${distribUpdateConnTimeout:60000}</int>
-
-  </solrcloud>
-
-  <shardHandlerFactory name="shardHandlerFactory"
-    class="HttpShardHandlerFactory">
-    <int name="socketTimeout">${socketTimeout:600000}</int>
-    <int name="connTimeout">${connTimeout:60000}</int>
-  </shardHandlerFactory>
-
-</solr>

+ 0 - 182
ambari-server/src/main/resources/stacks/BigInsights/4.0/blueprints/multinode-default.json

@@ -1,182 +0,0 @@
-{
-    "configurations" : [
-        {
-            "nagios-env" : {
-                "nagios_contact" : "admin@localhost"
-            }
-        }
-    ],
-    "host_groups" : [
-        {
-            "name" : "master_1",
-            "components" : [
-                {
-                    "name" : "NAMENODE"
-                },
-                {
-                    "name" : "ZOOKEEPER_SERVER"
-                },
-                {
-                    "name" : "HBASE_MASTER"
-                },
-                {
-                    "name" : "GANGLIA_SERVER"
-                },
-                {
-                    "name" : "HDFS_CLIENT"
-                },
-                {
-                    "name" : "YARN_CLIENT"
-                },
-                {
-                    "name" : "HCAT"
-                },
-                {
-                    "name" : "GANGLIA_MONITOR"
-                }
-            ],
-            "cardinality" : "1"
-        },
-        {
-            "name" : "master_2",
-            "components" : [
-
-                {
-                    "name" : "ZOOKEEPER_CLIENT"
-                },
-                {
-                    "name" : "HISTORYSERVER"
-                },
-                {
-                    "name" : "HIVE_SERVER"
-                },
-                {
-                    "name" : "SECONDARY_NAMENODE"
-                },
-                {
-                    "name" : "HIVE_METASTORE"
-                },
-                {
-                    "name" : "HDFS_CLIENT"
-                },
-                {
-                    "name" : "HIVE_CLIENT"
-                },
-                {
-                    "name" : "YARN_CLIENT"
-                },
-                {
-                    "name" : "MYSQL_SERVER"
-                },
-                {
-                    "name" : "POSTGRESQL_SERVER"
-                },
-                {
-                    "name" : "GANGLIA_MONITOR"
-                },
-                {
-                    "name" : "WEBHCAT_SERVER"
-                }
-            ],
-            "cardinality" : "1"
-        },
-        {
-            "name" : "master_3",
-            "components" : [
-                {
-                    "name" : "RESOURCEMANAGER"
-                },
-                {
-                    "name" : "ZOOKEEPER_SERVER"
-                },
-                {
-                    "name" : "GANGLIA_MONITOR"
-                }
-            ],
-            "cardinality" : "1"
-        },
-        {
-            "name" : "master_4",
-            "components" : [
-                {
-                    "name" : "OOZIE_SERVER"
-                },
-                {
-                    "name" : "ZOOKEEPER_SERVER"
-                },
-                {
-                    "name" : "GANGLIA_MONITOR"
-                }
-            ],
-            "cardinality" : "1"
-        },
-        {
-            "name" : "slave",
-            "components" : [
-                {
-                    "name" : "HBASE_REGIONSERVER"
-                },
-                {
-                    "name" : "NODEMANAGER"
-                },
-                {
-                    "name" : "DATANODE"
-                },
-                {
-                    "name" : "GANGLIA_MONITOR"
-                }
-            ],
-            "cardinality" : "${slavesCount}"
-        },
-        {
-            "name" : "gateway",
-            "components" : [
-                {
-                    "name" : "AMBARI_SERVER"
-                },
-                {
-                    "name" : "NAGIOS_SERVER"
-                },
-                {
-                    "name" : "ZOOKEEPER_CLIENT"
-                },
-                {
-                    "name" : "PIG"
-                },
-                {
-                    "name" : "OOZIE_CLIENT"
-                },
-                {
-                    "name" : "HBASE_CLIENT"
-                },
-                {
-                    "name" : "HCAT"
-                },
-                {
-                    "name" : "SQOOP"
-                },
-                {
-                    "name" : "HDFS_CLIENT"
-                },
-                {
-                    "name" : "HIVE_CLIENT"
-                },
-                {
-                    "name" : "YARN_CLIENT"
-                },
-                {
-                    "name" : "MAPREDUCE2_CLIENT"
-                },
-                {
-                    "name" : "GANGLIA_MONITOR"
-                }
-            ],
-            "cardinality" : "1"
-        }
-    ],
-    "Blueprints" : {
-        "blueprint_name" : "blueprint-multinode-default",
-        "stack_name" : "BigInsights",
-        "stack_version" : "4.0"
-    }
-}

+ 0 - 133
ambari-server/src/main/resources/stacks/BigInsights/4.0/blueprints/singlenode-default.json

@@ -1,133 +0,0 @@
-{
-    "configurations" : [
-        {
-            "nagios-env" : {
-                "nagios_contact" : "admin@localhost"
-            }
-        }
-    ],
-    "host_groups" : [
-        {
-            "name" : "host_group_1",
-            "components" : [
-                {
-                    "name" : "STORM_REST_API"
-                },
-                {
-                    "name" : "PIG"
-                },
-                {
-                    "name" : "HISTORYSERVER"
-                },
-                {
-                    "name" : "HBASE_REGIONSERVER"
-                },
-                {
-                    "name" : "OOZIE_CLIENT"
-                },
-                {
-                    "name" : "HBASE_CLIENT"
-                },
-                {
-                    "name" : "NAMENODE"
-                },
-                {
-                    "name" : "SUPERVISOR"
-                },
-                {
-                    "name" : "FALCON_SERVER"
-                },
-                {
-                    "name" : "HCAT"
-                },
-                {
-                    "name" : "AMBARI_SERVER"
-                },
-                {
-                    "name" : "APP_TIMELINE_SERVER"
-                },
-                {
-                    "name" : "HDFS_CLIENT"
-                },
-                {
-                    "name" : "HIVE_CLIENT"
-                },
-                {
-                    "name" : "NODEMANAGER"
-                },
-                {
-                    "name" : "DATANODE"
-                },
-                {
-                    "name" : "WEBHCAT_SERVER"
-                },
-                {
-                    "name" : "RESOURCEMANAGER"
-                },
-                {
-                    "name" : "ZOOKEEPER_SERVER"
-                },
-                {
-                    "name" : "ZOOKEEPER_CLIENT"
-                },
-                {
-                    "name" : "STORM_UI_SERVER"
-                },
-                {
-                    "name" : "HBASE_MASTER"
-                },
-                {
-                    "name" : "HIVE_SERVER"
-                },
-                {
-                    "name" : "OOZIE_SERVER"
-                },
-                {
-                    "name" : "FALCON_CLIENT"
-                },
-                {
-                    "name" : "NAGIOS_SERVER"
-                },
-                {
-                    "name" : "SECONDARY_NAMENODE"
-                },
-                {
-                    "name" : "HIVE_METASTORE"
-                },
-                {
-                    "name" : "GANGLIA_SERVER"
-                },
-                {
-                    "name" : "SQOOP"
-                },
-                {
-                    "name" : "YARN_CLIENT"
-                },
-                {
-                    "name" : "MAPREDUCE2_CLIENT"
-                },
-                {
-                    "name" : "MYSQL_SERVER"
-                },
-                {
-                    "name" : "POSTGRESQL_SERVER"
-                },
-                {
-                    "name" : "GANGLIA_MONITOR"
-                },
-                {
-                    "name" : "DRPC_SERVER"
-                },
-                {
-                    "name" : "NIMBUS"
-                }
-            ],
-            "cardinality" : "1"
-        }
-    ],
-    "Blueprints" : {
-        "blueprint_name" : "blueprint-singlenode-default",
-        "stack_name" : "BigInsights",
-        "stack_version" : "4.0"
-    }
-}

+ 0 - 268
ambari-server/src/main/resources/stacks/BigInsights/4.0/configuration/cluster-env.xml

@@ -1,268 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-    <property>
-        <name>security_enabled</name>
-        <value>false</value>
-        <description>Hadoop Security</description>
-    </property>
-    <property>
-        <name>kerberos_domain</name>
-        <value>EXAMPLE.COM</value>
-        <description>Kerberos realm.</description>
-    </property>
-    <property>
-        <name>ignore_groupsusers_create</name>
-        <display-name>Skip group modifications during install</display-name>
-        <value>false</value>
-        <description>Whether to ignore failures on users and group creation</description>
-        <property-type>ADDITIONAL_USER_PROPERTY</property-type>
-        <value-attributes>
-            <overridable>false</overridable>
-            <type>boolean</type>
-        </value-attributes>
-    </property>
-    <property>
-        <name>smokeuser</name>
-        <display-name>Smoke User</display-name>
-        <value>ambari-qa</value>
-        <property-type>USER</property-type>
-        <description>User executing service checks</description>
-    </property>
-    <property>
-        <name>smokeuser_keytab</name>
-        <value>/etc/security/keytabs/smokeuser.headless.keytab</value>
-        <description>Path to smoke test user keytab file</description>
-    </property>
-    <property>
-        <name>user_group</name>
-        <display-name>Hadoop Group</display-name>
-        <value>hadoop</value>
-        <property-type>GROUP</property-type>
-        <description>Hadoop user group.</description>
-    </property>
-
-  <!-- The properties that end in tar_source describe the pattern of where the tar.gz files come from.
-  They will replace {{ iop_stack_version }} with the "#.#.#.#" value followed by -* (which is the build number in HDP 2.2).
-  When copying those tarballs, Ambari will look up the corresponding tar_destination_folder property to know where it
-  should be copied to.
-  All of the destination folders must begin with hdfs://
-  Please note that the spaces inside of {{ ... }} are important.
-
-  IMPORTANT: Any properties included here must also be declared in site_properties.js
-
-  -->
-  <!-- Tez tarball is needed by Hive Server when using the Tez execution engine. -->
-  <!-- <property>
-    <name>tez_tar_source</name>
-    <value>/usr/iop/current/tez-client/lib/tez.tar.gz</value>
-    <description>Source file path that uses dynamic variables and regex to copy the file to HDFS.</description>
-  </property>
-  <property>
-    <name>tez_tar_destination_folder</name>
-    <value>hdfs:///iop/apps/{{ stack_version }}/tez/</value>
-    <description>Destination HDFS folder for the file.</description>
-  </property>  -->
-
-  <!-- Hive tarball is needed by WebHCat. -->
-  <property>
-    <name>hive_tar_source</name>
-    <value>/usr/iop/current/hive-client/hive.tar.gz</value>
-    <description>Source file path that uses dynamic variables and regex to copy the file to HDFS.</description>
-  </property>
-  <property>
-    <name>hive_tar_destination_folder</name>
-    <value>hdfs:///iop/apps/{{ stack_version }}/hive/</value>
-    <description>Destination HDFS folder for the file.</description>
-  </property>
-
-  <!-- Pig tarball is needed by WebHCat. -->
-  <property>
-    <name>pig_tar_source</name>
-    <value>/usr/iop/current/pig-client/pig.tar.gz</value>
-    <description>Source file path that uses dynamic variables and regex to copy the file to HDFS.</description>
-  </property>
-  <property>
-    <name>pig_tar_destination_folder</name>
-    <value>hdfs:///iop/apps/{{ stack_version }}/pig/</value>
-    <description>Destination HDFS folder for the file.</description>
-  </property>
-
-  <!-- Hadoop Streaming jar is needed by WebHCat. -->
-  <property>
-    <name>hadoop-streaming_tar_source</name>
-    <value>/usr/iop/current/hadoop-mapreduce-client/hadoop-streaming.jar</value>
-    <description>Source file path that uses dynamic variables and regex to copy the file to HDFS.</description>
-  </property>
-  <property>
-    <name>hadoop-streaming_tar_destination_folder</name>
-    <value>hdfs:///iop/apps/{{ stack_version }}/mapreduce/</value>
-    <description>Destination HDFS folder for the file.</description>
-  </property>
-
-  <!-- Sqoop tarball is needed by WebHCat. -->
-  <property>
-    <name>sqoop_tar_source</name>
-    <value>/usr/iop/current/sqoop-client/sqoop.tar.gz</value>
-    <description>Source file path that uses dynamic variables and regex to copy the file to HDFS.</description>
-  </property>
-  <property>
-    <name>sqoop_tar_destination_folder</name>
-    <value>hdfs:///iop/apps/{{ stack_version }}/sqoop/</value>
-    <description>Destination HDFS folder for the file.</description>
-  </property>
-
-  <!-- MapReduce2 tarball -->
-  <property>
-    <name>mapreduce_tar_source</name>
-    <value>/usr/iop/current/hadoop-client/mapreduce.tar.gz</value>
-    <description>Source file path that uses dynamic variables and regex to copy the file to HDFS.</description>
-  </property>
-  <property>
-    <name>mapreduce_tar_destination_folder</name>
-    <value>hdfs:///iop/apps/{{ stack_version }}/mapreduce/</value>
-    <description>Destination HDFS folder for the file.</description>
-  </property>
-  
-  <property>
-    <name>repo_suse_rhel_template</name>
-    <value>[{{repo_id}}]
-name={{repo_id}}
-{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}
-
-path=/
-enabled=1
-gpgcheck=0</value>
-    <description>Template of repositories for rhel and suse.</description>
-  </property>
-  <property>
-    <name>repo_ubuntu_template</name>
-    <value>{{package_type}} {{base_url}} {{components}}</value>
-    <description>Template of repositories for ubuntu.</description>
-  </property>
-  
-  <property>
-    <name>override_uid</name>
-    <display-name>Have Ambari manage UIDs</display-name>
-    <value>true</value>
-    <property-type>ADDITIONAL_USER_PROPERTY</property-type>
-    <description>Have Ambari manage UIDs</description>
-    <value-attributes>
-        <overridable>false</overridable>
-        <type>boolean</type>
-    </value-attributes>
-  </property>
-  
-  <property>
-    <name>fetch_nonlocal_groups</name>
-    <value>true</value>
-    <display-name>Ambari fetch nonlocal groups</display-name>
-    <description>Ambari requires fetching all the groups. This can be slow
-        on envs with enabled ldap. Setting this option to false will enable Ambari,
-        to skip user/group management connected with ldap groups.</description>
-    <value-attributes>
-      <overridable>false</overridable>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  
-  <property>
-    <name>managed_hdfs_resource_property_names</name>
-    <value/>
-    <description>Comma separated list of property names with HDFS resource paths.
-        Resource from this list will be managed even if it is marked as not managed in the stack</description>
-    <value-attributes>
-      <overridable>false</overridable>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  
-  <!-- Define stack_tools property in the base stack. DO NOT override this property for each stack version -->
-  <property>
-    <name>stack_tools</name>
-    <value/>
-    <description>Stack specific tools</description>
-    <property-type>VALUE_FROM_PROPERTY_FILE</property-type>
-    <value-attributes>
-      <property-file-name>stack_tools.json</property-file-name>
-      <property-file-type>json</property-file-type>
-      <read-only>true</read-only>
-      <overridable>false</overridable>
-      <visible>false</visible>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <!-- Define stack_features property in the base stack. DO NOT override this property for each stack version -->
-  <property>
-    <name>stack_features</name>
-    <value/>
-    <description>List of features supported by the stack</description>
-    <property-type>VALUE_FROM_PROPERTY_FILE</property-type>
-    <value-attributes>
-      <property-file-name>stack_features.json</property-file-name>
-      <property-file-type>json</property-file-type>
-      <read-only>true</read-only>
-      <overridable>false</overridable>
-      <visible>false</visible>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>stack_root</name>
-    <value>/usr/iop</value>
-    <description>Stack root folder</description>
-    <value-attributes>
-      <read-only>true</read-only>
-      <overridable>false</overridable>
-      <visible>false</visible>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>alerts_repeat_tolerance</name>
-    <value>1</value>
-    <description>The number of consecutive alerts required to transition an alert from the SOFT to the HARD state.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>ignore_bad_mounts</name>
-    <value>false</value>
-    <description>For properties handled by handle_mounted_dirs this will make Ambari not to create any directories.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>create_dirs_on_root</name>
-    <value>true</value>
-    <description>For properties handled by handle_mounted_dirs this will make Ambari to create not-existent unknown directories on / partition</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>one_dir_per_partition</name>
-    <value>true</value>
-    <description>For properties handled by handle_mounted_dirs this will make Ambari </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  
-</configuration>

+ 0 - 38
ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/after-INSTALL/scripts/hook.py

@@ -1,38 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.libraries.script.hook import Hook
-from shared_initialization import link_configs
-from shared_initialization import setup_config
-from shared_initialization import setup_iop_install_directory
-from resource_management.libraries.script import Script
-
-class AfterInstallHook(Hook):
-
-  def hook(self, env):
-    import params
-
-    env.set_params(params)
-    setup_iop_install_directory()
-    setup_config()
-
-    link_configs(self.stroutfile)
-
-if __name__ == "__main__":
-  AfterInstallHook().execute()

+ 0 - 88
ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/after-INSTALL/scripts/params.py

@@ -1,88 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from ambari_commons.constants import AMBARI_SUDO_BINARY
-from resource_management.libraries.script import Script
-from resource_management.libraries.functions import default
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import format_jvm_option
-from resource_management.libraries.functions.version import format_stack_version
-
-from resource_management.core.system import System
-from ambari_commons.os_check import OSCheck
-
-
-config = Script.get_config()
-sudo = AMBARI_SUDO_BINARY
-
-stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-iop_stack_version = format_stack_version(stack_version_unformatted)
-
-# default hadoop params
-mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
-hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
-
-# IOP 4.0+ params
-if Script.is_stack_greater_or_equal("4.0"):
-  mapreduce_libs_path = "/usr/iop/current/hadoop-mapreduce-client/*"
-  # not supported in IOP 4.0+
-  hadoop_conf_empty_dir = None
-
-versioned_iop_root = '/usr/iop/current'
-
-#security params
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-#java params
-java_home = config['hostLevelParams']['java_home']
-
-#hadoop params
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
-hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
-
-jsvc_path = "/usr/lib/bigtop-utils"
-
-hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
-namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
-namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
-
-jtnode_opt_newsize = "200m"
-jtnode_opt_maxnewsize = "200m"
-jtnode_heapsize =  "1024m"
-ttnode_heapsize = "1024m"
-
-dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
-mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-
-#users and groups
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-user_group = config['configurations']['cluster-env']['user_group']
-
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-has_namenode = not len(namenode_host) == 0
-
-if has_namenode:
-  hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)

+ 0 - 89
ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/after-INSTALL/scripts/shared_initialization.py

@@ -1,89 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-import shutil
-
-import ambari_simplejson as json
-from resource_management.core.logger import Logger
-from resource_management.core.resources.system import Execute
-from resource_management.core.shell import as_sudo
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.version import compare_versions
-from resource_management.libraries.resources.xml_config import XmlConfig
-from resource_management.libraries.script import Script
-
-def setup_iop_install_directory():
-  # This is a name of marker file.
-  SELECT_ALL_PERFORMED_MARKER = "/var/lib/ambari-agent/data/iop-select-set-all.performed"
-
-  import params
-  if params.iop_stack_version != "" and compare_versions(params.iop_stack_version, '4.0') >= 0:
-    Execute(as_sudo(['touch', SELECT_ALL_PERFORMED_MARKER]) + ' ; ' +
-                   format('{sudo} /usr/bin/iop-select set all `ambari-python-wrap /usr/bin/iop-select versions | grep ^{stack_version_unformatted} | tail -1`'),
-            only_if=format('ls -d /usr/iop/{stack_version_unformatted}*'),   # If any IOP version is installed
-            not_if=format("test -f {SELECT_ALL_PERFORMED_MARKER}")           # Do that only once (otherwise we break rolling upgrade logic)
-    )
-
-def setup_config():
-  import params
-  if params.has_namenode:
-    # create core-site only if the hadoop config directory exists
-    XmlConfig("core-site.xml",
-              conf_dir=params.hadoop_conf_dir,
-              configurations=params.config['configurations']['core-site'],
-              configuration_attributes=params.config['configuration_attributes']['core-site'],
-              owner=params.hdfs_user,
-              group=params.user_group,
-              only_if=format("ls {hadoop_conf_dir}"))
-
-
-def load_version(struct_out_file):
-  """
-  Load version from file.  Made a separate method for testing
-  """
-  json_version = None
-  try:
-    if os.path.exists(struct_out_file):
-      with open(struct_out_file, 'r') as fp:
-        json_info = json.load(fp)
-        json_version = json_info['version']
-  except:
-    pass
-
-  return json_version
-  
-
-def link_configs(struct_out_file):
-  """
-  Links configs, only on a fresh install of BigInsights-4.1 and higher
-  """
-
-  if not Script.is_stack_greater_or_equal("4.1"):
-    Logger.info("Can only link configs for BigInsights-4.1 and higher.")
-    return
-
-  json_version = load_version(struct_out_file)
-
-  if not json_version:
-    Logger.info("Could not load 'version' from {0}".format(struct_out_file))
-    return
-
-  for k, v in conf_select.get_package_dirs().iteritems():
-    conf_select.convert_conf_directories_to_symlinks(k, json_version, v)

+ 0 - 63
ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/files/changeToSecureUid.sh

@@ -1,63 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-username=$1
-directories=$2
-newUid=$3
-
-function find_available_uid() {
- for ((i=1001; i<=2000; i++))
- do
-   grep -q $i /etc/passwd
-   if [ "$?" -ne 0 ]
-   then
-    newUid=$i
-    break
-   fi
- done
-}
-
-if [ -z $2 ]; then
-  test $(id -u ${username} 2>/dev/null)
-  if [ $? -ne 1 ]; then
-   newUid=`id -u ${username}`
-  else
-   find_available_uid 
-  fi
-  echo $newUid
-  exit 0
-fi
-
-if [ $newUid -eq 0 ]
-then
-  echo "Failed to find Uid between 1000 and 2000"
-  exit 1
-fi
-
-
-set -e
-dir_array=($(echo $directories | sed 's/,/\n/g'))
-old_uid=$(id -u $username)
-sudo_prefix="/var/lib/ambari-agent/ambari-sudo.sh -H -E"
-echo "Changing uid of $username from $old_uid to $newUid"
-echo "Changing directory permisions for ${dir_array[@]}"
-$sudo_prefix usermod -u $newUid $username && for dir in ${dir_array[@]} ; do ls $dir 2> /dev/null && echo "Changing permission for $dir" && $sudo_prefix chown -Rh $newUid $dir ; done
-exit 0

+ 0 - 36
ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/hook.py

@@ -1,36 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from shared_initialization import *
-
-class BeforeAnyHook(Hook):
-
-  def hook(self, env):
-    import params
-    env.set_params(params)
-    
-    setup_users()
-    if params.has_namenode:
-      setup_hadoop_env()
-    setup_java()
-
-if __name__ == "__main__":
-  BeforeAnyHook().execute()
-

+ 0 - 226
ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/params.py

@@ -1,226 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import collections
-import re
-import os
-
-import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
-
-from resource_management.libraries.script import Script
-from resource_management.libraries.functions import default
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import format_jvm_option
-from resource_management.libraries.functions.is_empty import is_empty
-from resource_management.libraries.functions.version import format_stack_version
-from resource_management.libraries.functions.version import compare_versions
-from ambari_commons.os_check import OSCheck
-from ambari_commons.constants import AMBARI_SUDO_BINARY
-
-
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
-jdk_name = default("/hostLevelParams/jdk_name", None)
-java_home = config['hostLevelParams']['java_home']
-java_version = int(config['hostLevelParams']['java_version'])
-jdk_location = config['hostLevelParams']['jdk_location']
-
-sudo = AMBARI_SUDO_BINARY
-
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-
-stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-iop_stack_version = format_stack_version(stack_version_unformatted)
-
-restart_type = default("/commandParams/restart_type", "")
-version = default("/commandParams/version", None)
-# Handle upgrade and downgrade
-if (restart_type.lower() == "rolling_upgrade" or restart_type.lower() == "nonrolling_upgrade") and version:
-  iop_stack_version = format_stack_version(version)
-
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-
-# Some datanode settings
-dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None)
-dfs_dn_http_addr = default('/configurations/hdfs-site/dfs.datanode.http.address', None)
-dfs_dn_https_addr = default('/configurations/hdfs-site/dfs.datanode.https.address', None)
-dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
-secure_dn_ports_are_in_use = False
-
-def get_port(address):
-  """
-  Extracts port from the address like 0.0.0.0:1019
-  """
-  if address is None:
-    return None
-  m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
-  if m is not None:
-    return int(m.group(2))
-  else:
-    return None
-
-def is_secure_port(port):
-  """
-  Returns True if port is root-owned at *nix systems
-  """
-  if port is not None:
-    return port < 1024
-  else:
-    return False
-
-# hadoop default params
-mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-
-# upgrades would cause these directories to have a version instead of "current"
-# which would cause a lot of problems when writing out hadoop-env.sh; instead
-# force the use of "current" in the hook
-hadoop_home = stack_select.get_hadoop_dir("home", force_latest_on_upgrade=True)
-hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec", force_latest_on_upgrade=True)
-
-hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
-hadoop_secure_dn_user = hdfs_user
-hadoop_dir = "/etc/hadoop"
-versioned_iop_root = '/usr/iop/current'
-hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir")
-datanode_max_locked_memory = config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory']
-is_datanode_max_locked_memory_set = not is_empty(config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory'])
-
-
-if Script.is_stack_greater_or_equal("4.0"):
-  mapreduce_libs_path = "/usr/iop/current/hadoop-mapreduce-client/*"
-  
-  # not supported in IOP 4.0+
-  hadoop_conf_empty_dir = None
-
-  if not security_enabled:
-    hadoop_secure_dn_user = '""'
-  else:
-    dfs_dn_port = get_port(dfs_dn_addr)
-    dfs_dn_http_port = get_port(dfs_dn_http_addr)
-    dfs_dn_https_port = get_port(dfs_dn_https_addr)
-    # We try to avoid inability to start datanode as a plain user due to usage of root-owned ports
-    if dfs_http_policy == "HTTPS_ONLY":
-      secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_https_port)
-    elif dfs_http_policy == "HTTP_AND_HTTPS":
-      secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port) or is_secure_port(dfs_dn_https_port)
-    else:   # params.dfs_http_policy == "HTTP_ONLY" or not defined:
-      secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port)
-    if secure_dn_ports_are_in_use:
-      hadoop_secure_dn_user = hdfs_user
-    else:
-      hadoop_secure_dn_user = '""'
-
-
-#hadoop params
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
-hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
-
-jsvc_path = "/usr/lib/bigtop-utils"
-
-hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
-namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
-namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
-
-jtnode_opt_newsize = "200m"
-jtnode_opt_maxnewsize = "200m"
-jtnode_heapsize =  "1024m"
-ttnode_heapsize = "1024m"
-
-dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
-nfsgateway_heapsize = config['configurations']['hadoop-env']['nfsgateway_heapsize']
-mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
-
-#users and groups
-hbase_user = config['configurations']['hbase-env']['hbase_user']
-smoke_user =  config['configurations']['cluster-env']['smokeuser']
-gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
-gmond_user = config['configurations']['ganglia-env']["gmond_user"]
-tez_user = None; #config['configurations']['tez-env']["tez_user"]
-oozie_user = config['configurations']['oozie-env']["oozie_user"]
-falcon_user = config['configurations']['falcon-env']["falcon_user"]
-ranger_user = config['configurations']['ranger-env']["ranger_user"]
-
-user_group = config['configurations']['cluster-env']['user_group']
-
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-falcon_server_hosts = default("/clusterHostInfo/falcon_server_hosts", [])
-ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
-
-has_namenode = not len(namenode_host) == 0
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-has_tez = False #'tez-site' in config['configurations']
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_oozie_server = not len(oozie_servers) == 0
-has_falcon_server_hosts = not len(falcon_server_hosts) == 0
-has_ranger_admin = not len(ranger_admin_hosts) == 0
-
-
-if has_namenode:
-  hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
-
-hbase_tmp_dir = "/tmp/hbase-hbase"
-
-proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
-ranger_group = config['configurations']['ranger-env']['ranger_group']
-dfs_cluster_administrators_group = config['configurations']['hdfs-site']["dfs.cluster.administrators"]
-
-ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
-
-smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
-if has_hbase_masters:
-  hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
-#repo params
-repo_info = config['hostLevelParams']['repo_info']
-service_repo_info = default("/hostLevelParams/service_repo_info",None)
-
-user_to_groups_dict = collections.defaultdict(lambda:[user_group])
-user_to_groups_dict[smoke_user] = [proxyuser_group]
-if has_ganglia_server:
-  user_to_groups_dict[gmond_user] = [gmond_user]
-  user_to_groups_dict[gmetad_user] = [gmetad_user]
-if has_tez:
-  user_to_groups_dict[tez_user] = [proxyuser_group]
-if has_oozie_server:
-  user_to_groups_dict[oozie_user] = [proxyuser_group]
-if has_falcon_server_hosts:
-  user_to_groups_dict[falcon_user] = [proxyuser_group]  
-if has_ranger_admin:
-  user_to_groups_dict[ranger_user] = [ranger_group]
-
-user_to_gid_dict = collections.defaultdict(lambda:user_group)
-
-user_list = json.loads(config['hostLevelParams']['user_list'])
-group_list = json.loads(config['hostLevelParams']['group_list'])
-host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
-
-override_uid = str(default("/configurations/cluster-env/override_uid", "true")).lower()

+ 0 - 242
ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/shared_initialization.py

@@ -1,242 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-import re
-import getpass
-from copy import copy
-from resource_management.libraries.functions.version import compare_versions
-from resource_management import *
-
-
-def setup_users():
-  """
-  Creates users before cluster installation
-  """
-  import params
-
-  if not params.host_sys_prepped and not params.ignore_groupsusers_create:
-    for group in params.group_list:
-      Group(group,
-      )
-
-
-    for user in params.user_list:
-      if params.override_uid == "true":
-        User(user,
-            uid = get_uid(user),
-            gid = params.user_to_gid_dict[user],
-            groups = params.user_to_groups_dict[user],
-        )
-      else:
-        User(user,
-            gid = params.user_to_gid_dict[user],
-            groups = params.user_to_groups_dict[user],
-        )
-
-    if params.override_uid == "true":
-      set_uid(params.smoke_user, params.smoke_user_dirs)
-    else:
-      Logger.info('Skipping setting uid for smoke user as host is sys prepped')
-  else:
-    Logger.info('Skipping creation of User and Group as host is sys prepped or ignore_groupsusers_create flag is on')
-    pass
-
-
-  if params.has_hbase_masters:
-    Directory (params.hbase_tmp_dir,
-               owner = params.hbase_user,
-               mode=0775,
-               recursive = True,
-               cd_access="a",
-    )
-    if not params.host_sys_prepped and params.override_uid == "true":
-      set_uid(params.hbase_user, params.hbase_user_dirs)
-    else:
-      Logger.info('Skipping setting uid for hbase user as host is sys prepped')      
-      pass
-
-  if not params.host_sys_prepped:
-    if params.has_namenode:
-      create_dfs_cluster_admins()
-  else:
-    Logger.info('Skipping setting dfs cluster admin as host is sys prepped')
-
-
-def create_dfs_cluster_admins():
-  """
-  dfs.cluster.administrators support format <comma-delimited list of usernames><space><comma-delimited list of group names>
-  """
-  import params
-
-  groups_list = create_users_and_groups(params.dfs_cluster_administrators_group)
-
-  User(params.hdfs_user,
-    groups = params.user_to_groups_dict[params.hdfs_user] + groups_list,
-    ignore_failures = params.ignore_groupsusers_create
-  )
-def create_users_and_groups(user_and_groups):
-
-  import params
-
-  parts = re.split('\s', user_and_groups)
-  if len(parts) == 1:
-    parts.append("")
-
-  users_list = parts[0].split(",") if parts[0] else []
-  groups_list = parts[1].split(",") if parts[1] else []
-
-  if users_list:
-    User(users_list,
-         ignore_failures = params.ignore_groupsusers_create
-    )
-
-  if groups_list:
-    Group(copy(groups_list),
-          ignore_failures = params.ignore_groupsusers_create
-    )
-  return groups_list
-    
-def set_uid(user, user_dirs):
-  """
-  user_dirs - comma separated directories
-  """
-  import params
-
-  File(format("{tmp_dir}/changeUid.sh"),
-       content=StaticFile("changeToSecureUid.sh"),
-       mode=0555)
-  ignore_groupsusers_create_str = str(params.ignore_groupsusers_create).lower()
-  uid = get_uid(user)
-  Execute(format("{tmp_dir}/changeUid.sh {user} {user_dirs} {uid}"),
-          not_if = format("(test $(id -u {user}) -gt 1000) || ({ignore_groupsusers_create_str})"))
-
-def get_uid(user):
-  import params
-  import commands
-  user_str = str(user) + "_uid"
-  service_env = [ serviceEnv for serviceEnv in params.config['configurations'] if user_str in params.config['configurations'][serviceEnv]]
-  
-  if service_env and params.config['configurations'][service_env[0]][user_str]:
-    service_env_str = str(service_env[0])
-    uid = params.config['configurations'][service_env_str][user_str]
-    if len(service_env) > 1:
-      Logger.warning("Multiple values found for %s, using %s"  % (user_str, uid))
-    return uid 
-  else:
-    if user == params.smoke_user:
-      return 0
-    File(format("{tmp_dir}/changeUid.sh"),
-       content=StaticFile("changeToSecureUid.sh"),
-       mode=0555)
-    ignore_groupsusers_create_str = str(params.ignore_groupsusers_create).lower()
-    newUid=commands.getoutput(format("{tmp_dir}/changeUid.sh {user}"))
-    return newUid
-    
-def setup_hadoop_env():
-  import params
-  if params.has_namenode:
-    if params.security_enabled:
-      tc_owner = "root"
-    else:
-      tc_owner = params.hdfs_user
-
-    Directory(params.hadoop_dir, mode=0755)
-
-    # IOP < 4.0 used a conf -> conf.empty symlink for /etc/hadoop/
-    if Script.is_stack_less_than("4.0"):
-      Directory(params.hadoop_conf_empty_dir,
-              recursive=True,
-              owner='root',
-              group=params.user_group
-      )
-      Link(params.hadoop_conf_dir,
-         to=params.hadoop_conf_empty_dir,
-         not_if=format("ls {hadoop_conf_dir}")
-      )
-      
-    # write out hadoop-env.sh, but only if the directory exists
-    if os.path.exists(params.hadoop_conf_dir):
-      File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'),
-         owner=tc_owner, group=params.user_group,
-         content=InlineTemplate(params.hadoop_env_sh_template)
-      )
-      
-    # Create tmp dir for java.io.tmpdir
-    # Handle a situation when /tmp is set to noexec
-    Directory(params.hadoop_java_io_tmpdir,
-              owner=params.hdfs_user,
-              group=params.user_group,
-              mode=0777
-    )
-  
-def setup_java():
-  """
-  Installs jdk using specific params, that comes from ambari-server
-  """
-  import params
-
-  java_exec = format("{java_home}/bin/java")
-
-  if not os.path.isfile(java_exec):
-
-    jdk_curl_target = format("{tmp_dir}/{jdk_name}")
-    java_dir = os.path.dirname(params.java_home)
-    tmp_java_dir = format("{tmp_dir}/jdk")
-
-    if not params.jdk_name:
-      return
-
-    Directory(params.artifact_dir,
-              recursive = True,
-              )
-
-    File(jdk_curl_target,
-         content = DownloadSource(format("{jdk_location}/{jdk_name}")),
-         not_if = format("test -f {jdk_curl_target}")
-    )
-
-    if params.jdk_name.endswith(".bin"):
-      chmod_cmd = ("chmod", "+x", jdk_curl_target)
-      install_cmd = format("mkdir -p {tmp_java_dir} && cd {tmp_java_dir} && echo A | {jdk_curl_target} -noregister && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
-    elif params.jdk_name.endswith(".gz"):
-      chmod_cmd = ("chmod","a+x", java_dir)
-      install_cmd = format("mkdir -p {tmp_java_dir} && cd {tmp_java_dir} && tar -xf {jdk_curl_target} && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
-
-    Directory(java_dir
-    )
-
-    Execute(chmod_cmd,
-            sudo = True,
-            )
-
-    Execute(install_cmd,
-            )
-
-    File(format("{java_home}/bin/java"),
-         mode=0755,
-         cd_access="a",
-         )
-
-    Execute(("chgrp","-R", params.user_group, params.java_home),
-            sudo = True,
-            )
-    Execute(("chown","-R", getpass.getuser(), params.java_home),
-            sudo = True,
-            )

+ 0 - 37
ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/hook.py

@@ -1,37 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from shared_initialization import *
-from repo_initialization import *
-
-class BeforeInstallHook(Hook):
-
-  def hook(self, env):
-    import params
-
-    self.run_custom_hook('before-ANY')
-    env.set_params(params)
-    
-    install_repos()
-    install_packages()
-
-if __name__ == "__main__":
-  BeforeInstallHook().execute()

+ 0 - 111
ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/params.py

@@ -1,111 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from ambari_commons.constants import AMBARI_SUDO_BINARY
-from resource_management.libraries.functions.version import format_stack_version, compare_versions
-from resource_management.core.system import System
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import default, format
-
-
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-sudo = AMBARI_SUDO_BINARY
-
-stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-iop_stack_version = format_stack_version(stack_version_unformatted)
-
-#users and groups
-hbase_user = config['configurations']['hbase-env']['hbase_user']
-smoke_user =  config['configurations']['cluster-env']['smokeuser']
-gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
-gmond_user = config['configurations']['ganglia-env']["gmond_user"]
-tez_user = None #config['configurations']['tez-env']["tez_user"]
-
-user_group = config['configurations']['cluster-env']['user_group']
-proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
-
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-
-# repo templates
-repo_rhel_suse =  config['configurations']['cluster-env']['repo_suse_rhel_template']
-repo_ubuntu =  config['configurations']['cluster-env']['repo_ubuntu_template']
-
-#hosts
-hostname = config["hostname"]
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-storm_server_hosts = default("/clusterHostInfo/nimbus_hosts", [])
-falcon_host =  default('/clusterHostInfo/falcon_server_hosts', [])
-
-has_sqoop_client = 'sqoop-env' in config['configurations']
-has_namenode = not len(namenode_host) == 0
-has_hs = not len(hs_host) == 0
-has_resourcemanager = not len(rm_host) == 0
-has_slaves = not len(slave_hosts) == 0
-has_oozie_server = not len(oozie_servers)  == 0
-has_hcat_server_host = not len(hcat_server_hosts)  == 0
-has_hive_server_host = not len(hive_server_host)  == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_zk_host = not len(zk_hosts) == 0
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-has_storm_server = not len(storm_server_hosts) == 0
-has_falcon_server = not len(falcon_host) == 0
-has_tez = False #'tez-site' in config['configurations']
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-if has_ganglia_server:
-  ganglia_server_host = ganglia_server_hosts[0]
-
-hbase_tmp_dir = "/tmp/hbase-hbase"
-
-#security params
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-#java params
-java_home = config['hostLevelParams']['java_home']
-artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
-jdk_name = default("/hostLevelParams/jdk_name", None) # None when jdk is already installed by user
-jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
-jce_location = config['hostLevelParams']['jdk_location']
-jdk_location = config['hostLevelParams']['jdk_location']
-ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
-host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
-
-smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
-if has_hbase_masters:
-  hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
-#repo params
-repo_info = config['hostLevelParams']['repo_info']
-service_repo_info = default("/hostLevelParams/service_repo_info",None)
-

+ 0 - 91
ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/repo_initialization.py

@@ -1,91 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from resource_management import *
-from ambari_commons.os_check import OSCheck
-import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
-
-# components_lits = repoName + postfix
-_UBUNTU_REPO_COMPONENTS_POSTFIX = ["main"]
-# Defect 110271, clear IOP*** repo cache
-#REPO_FILE_NAME_PREFIX = 'IOP-'
-#STACK_TO_ROOT_FOLDER = {"IOP": "/usr/iop", "BIGINSIGHTS":"/usr/iop"}
-
-def _alter_repo(action, repo_string, repo_template):
-  """
-  @param action: "delete" or "create"
-  @param repo_string: e.g. "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]"
-  """
-  repo_dicts = json.loads(repo_string)
-
-  if not isinstance(repo_dicts, list):
-    repo_dicts = [repo_dicts]
-
-  for repo in repo_dicts:
-    if not 'baseUrl' in repo:
-      repo['baseUrl'] = None
-    if not 'mirrorsList' in repo:
-      repo['mirrorsList'] = None
-    
-    ubuntu_components = [ repo['repoName'] ] + _UBUNTU_REPO_COMPONENTS_POSTFIX
-    
-    Repository(repo['repoId'],
-               action = action,
-               base_url = repo['baseUrl'],
-               mirror_list = repo['mirrorsList'],
-               repo_file_name = repo['repoName'],
-               repo_template = repo_template,
-               components = ubuntu_components, # ubuntu specific
-    )
-'''    # Defect 110271, clear IOP*** repo cache
-    if action == "create":
-      #Attempt to clean cache against the given repo
-      repo_id=repo['repoId']
-      print "Clean cache against " + repo_id + "; file:" + repo['repoName']
-      current_repo_ids = []
-      current_repo_files = set() 
-      
-      if OSCheck.is_ubuntu_family():
-        current_repo_files.add("base")
-        current_repo_files.add(repo['repoName'])
-      elif OSCheck.is_suse_family():
-        current_repo_ids.append("base")
-        current_repo_ids.append(repo_id)
-      else:  
-        current_repo_ids.append(repo_id)
-      
-      Repository(repo_id,
-                 action = "clearcache",
-                 base_url = repo['baseUrl'],
-                 mirror_list = repo['mirrorsList'],
-                 repo_file_name = repo['repoName'],
-                 repo_template = repo_template,
-                 components = ubuntu_components,  # ubuntu specific
-                 use_repos=list(current_repo_files) if OSCheck.is_ubuntu_family() else current_repo_ids,
-                 skip_repos=["*"] if OSCheck.is_redhat_family() else []
-      )      
-''' 
-def install_repos():
-  import params
-  if params.host_sys_prepped:
-    return
-
-  template = params.repo_rhel_suse if OSCheck.is_suse_family() or OSCheck.is_redhat_family() else params.repo_ubuntu
-  _alter_repo("create", params.repo_info, template)
-  if params.service_repo_info:
-    _alter_repo("create", params.service_repo_info, template)

+ 0 - 34
ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/shared_initialization.py

@@ -1,34 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-
-from resource_management import *
-
-
-def install_packages():
-  import params
-  if params.host_sys_prepped:
-    return
-
-  packages = ['unzip', 'curl']
-  if params.iop_stack_version != "" and compare_versions(params.iop_stack_version, '4.0') >= 0:
-    packages.append('iop-select')
-  Package(packages)
-

+ 0 - 29
ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-RESTART/scripts/hook.py

@@ -1,29 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-class BeforeRestartHook(Hook):
-
-  def hook(self, env):
-    self.run_custom_hook('before-START')
-
-if __name__ == "__main__":
-  BeforeRestartHook().execute()
-

+ 0 - 65
ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/checkForFormat.sh

@@ -1,65 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-export hdfs_user=$1
-shift
-export conf_dir=$1
-shift
-export bin_dir=$1
-shift
-export mark_dir=$1
-shift
-export name_dirs=$*
-
-export EXIT_CODE=0
-export command="namenode -format"
-export list_of_non_empty_dirs=""
-
-mark_file=/var/run/hadoop/hdfs/namenode-formatted
-if [[ -f ${mark_file} ]] ; then
-  /var/lib/ambari-agent/ambari-sudo.sh rm -f ${mark_file}
-  /var/lib/ambari-agent/ambari-sudo.sh mkdir -p ${mark_dir}
-fi
-
-if [[ ! -d $mark_dir ]] ; then
-  for dir in `echo $name_dirs | tr ',' ' '` ; do
-    echo "NameNode Dirname = $dir"
-    cmd="ls $dir | wc -l  | grep -q ^0$"
-    eval $cmd
-    if [[ $? -ne 0 ]] ; then
-      (( EXIT_CODE = $EXIT_CODE + 1 ))
-      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
-    fi
-  done
-
-  if [[ $EXIT_CODE == 0 ]] ; then
-    /var/lib/ambari-agent/ambari-sudo.sh su ${hdfs_user} - -s /bin/bash -c "export PATH=$PATH:$bin_dir ; yes Y | hdfs --config ${conf_dir} ${command}"
-    (( EXIT_CODE = $EXIT_CODE | $? ))
-  else
-    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
-  fi
-else
-  echo "${mark_dir} exists. Namenode DFS already formatted"
-fi
-
-exit $EXIT_CODE
-

BIN
ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/fast-hdfs-resource.jar


+ 0 - 134
ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/task-log4j.properties

@@ -1,134 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-
-# Define some default values that can be overridden by system properties
-hadoop.root.logger=INFO,console
-hadoop.log.dir=.
-hadoop.log.file=hadoop.log
-
-#
-# Job Summary Appender 
-#
-# Use following logger to send summary to separate file defined by 
-# hadoop.mapreduce.jobsummary.log.file rolled daily:
-# hadoop.mapreduce.jobsummary.logger=INFO,JSA
-# 
-hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
-hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hadoop.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshhold=ALL
-
-#
-# Daily Rolling File Appender
-#
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this 
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-#
-# TaskLog Appender
-#
-
-#Default values
-hadoop.tasklog.taskid=null
-hadoop.tasklog.iscleanup=false
-hadoop.tasklog.noKeepSplits=4
-hadoop.tasklog.totalLogFileSize=100
-hadoop.tasklog.purgeLogSplits=true
-hadoop.tasklog.logsRetainHours=12
-
-log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
-
-log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
-log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-
-#
-# Rolling File Appender
-#
-
-#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Logfile size and and 30-day backups
-#log4j.appender.RFA.MaxFileSize=1MB
-#log4j.appender.RFA.MaxBackupIndex=30
-
-#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-# Custom Logging levels
-
-hadoop.metrics.log.level=INFO
-#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
-#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
-#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
-
-# Jets3t library
-log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
-
-#
-# Null Appender
-# Trap security logger on the hadoop client side
-#
-log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
- 
-# Removes "deprecated" messages
-log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN

+ 0 - 66
ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/topology_script.py

@@ -1,66 +0,0 @@
-#!/usr/bin/env python
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import sys, os
-from string import join
-import ConfigParser
-
-
-DEFAULT_RACK = "/default-rack"
-DATA_FILE_NAME =  os.path.dirname(os.path.abspath(__file__)) + "/topology_mappings.data"
-SECTION_NAME = "network_topology"
-
-class TopologyScript():
-
-  def load_rack_map(self):
-    try:
-      #RACK_MAP contains both host name vs rack and ip vs rack mappings
-      mappings = ConfigParser.ConfigParser()
-      mappings.read(DATA_FILE_NAME)
-      return dict(mappings.items(SECTION_NAME))
-    except ConfigParser.NoSectionError:
-      return {}
-
-  def get_racks(self, rack_map, args):
-    if len(args) == 1:
-      return DEFAULT_RACK
-    else:
-      return join([self.lookup_by_hostname_or_ip(input_argument, rack_map) for input_argument in args[1:]],)
-
-  def lookup_by_hostname_or_ip(self, hostname_or_ip, rack_map):
-    #try looking up by hostname
-    rack = rack_map.get(hostname_or_ip)
-    if rack is not None:
-      return rack
-    #try looking up by ip
-    rack = rack_map.get(self.extract_ip(hostname_or_ip))
-    #try by localhost since hadoop could be passing in 127.0.0.1 which might not be mapped
-    return rack if rack is not None else rack_map.get("localhost.localdomain", DEFAULT_RACK)
-
-  #strips out port and slashes in case hadoop passes in something like 127.0.0.1/127.0.0.1:50010
-  def extract_ip(self, container_string):
-    return container_string.split("/")[0].split(":")[0]
-
-  def execute(self, args):
-    rack_map = self.load_rack_map()
-    rack = self.get_racks(rack_map, args)
-    print rack
-
-if __name__ == "__main__":
-  TopologyScript().execute(sys.argv)

+ 0 - 40
ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/hook.py

@@ -1,40 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from rack_awareness import create_topology_script_and_mapping
-from shared_initialization import setup_hadoop, setup_configs, create_javahome_symlink
-
-
-class BeforeStartHook(Hook):
-
-  def hook(self, env):
-    import params
-
-    self.run_custom_hook('before-ANY')
-    env.set_params(params)
-
-    setup_hadoop()
-    setup_configs()
-    create_javahome_symlink()
-    create_topology_script_and_mapping()	
-
-if __name__ == "__main__":
-  BeforeStartHook().execute()

+ 0 - 211
ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/params.py

@@ -1,211 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import default
-from resource_management.libraries.functions import format_jvm_option
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions.version import format_stack_version, compare_versions
-from ambari_commons.os_check import OSCheck
-from resource_management.libraries.script.script import Script
-
-config = Script.get_config()
-
-stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-iop_stack_version = format_stack_version(stack_version_unformatted)
-
-# hadoop default params
-mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-
-hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
-hadoop_lib_home = stack_select.get_hadoop_dir("lib")
-hadoop_bin = stack_select.get_hadoop_dir("sbin")
-hadoop_home = '/usr'
-create_lib_snappy_symlinks = True
-
-# IOP 4.0+ params
-if Script.is_stack_greater_or_equal("4.0"):
-  mapreduce_libs_path = "/usr/iop/current/hadoop-mapreduce-client/*"
-  hadoop_home = stack_select.get_hadoop_dir("home")
-  create_lib_snappy_symlinks = False
-
-
-current_service = config['serviceName']
-
-#security params
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-#users and groups
-has_hadoop_env = 'hadoop-env' in config['configurations']
-mapred_user = config['configurations']['mapred-env']['mapred_user']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-yarn_user = config['configurations']['yarn-env']['yarn_user']
-
-user_group = config['configurations']['cluster-env']['user_group']
-
-#hosts
-hostname = config["hostname"]
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
-
-has_namenode = not len(namenode_host) == 0
-has_resourcemanager = not len(rm_host) == 0
-has_slaves = not len(slave_hosts) == 0
-has_oozie_server = not len(oozie_servers)  == 0
-has_hcat_server_host = not len(hcat_server_hosts)  == 0
-has_hive_server_host = not len(hive_server_host)  == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_zk_host = not len(zk_hosts) == 0
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-has_metric_collector = not len(ams_collector_hosts) == 0
-
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-if has_ganglia_server:
-  ganglia_server_host = ganglia_server_hosts[0]
-if has_metric_collector:
-  if 'cluster-env' in config['configurations'] and \
-      'metrics_collector_vip_host' in config['configurations']['cluster-env']:
-    metric_collector_host = config['configurations']['cluster-env']['metrics_collector_vip_host']
-  else:
-    metric_collector_host = ams_collector_hosts[0]
-  if 'cluster-env' in config['configurations'] and \
-      'metrics_collector_vip_port' in config['configurations']['cluster-env']:
-    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
-  else:
-    metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
-    if metric_collector_web_address.find(':') != -1:
-      metric_collector_port = metric_collector_web_address.split(':')[1]
-    else:
-      metric_collector_port = '6188'
-  pass
-metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
-metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 60)
-
-#hadoop params
-
-if has_namenode:
-  hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
-  hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
-  task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
-  
-hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
-
-
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-hbase_tmp_dir = "/tmp/hbase-hbase"
-
-#db params
-server_db_name = config['hostLevelParams']['db_name']
-db_driver_filename = config['hostLevelParams']['db_driver_filename']
-oracle_driver_url = config['hostLevelParams']['oracle_jdbc_url']
-mysql_driver_url = config['hostLevelParams']['mysql_jdbc_url']
-ambari_server_resources = config['hostLevelParams']['jdk_location']
-oracle_driver_symlink_url = format("{ambari_server_resources}oracle-jdbc-driver.jar")
-mysql_driver_symlink_url = format("{ambari_server_resources}mysql-jdbc-driver.jar")
-
-ambari_db_rca_url = config['hostLevelParams']['ambari_db_rca_url'][0]
-ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver'][0]
-ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username'][0]
-ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password'][0]
-
-if has_namenode and 'rca_enabled' in config['configurations']['hadoop-env']:
-  rca_enabled =  config['configurations']['hadoop-env']['rca_enabled']
-else:
-  rca_enabled = False
-rca_disabled_prefix = "###"
-if rca_enabled == True:
-  rca_prefix = ""
-else:
-  rca_prefix = rca_disabled_prefix
-
-#hadoop-env.sh
-java_home = config['hostLevelParams']['java_home']
-
-jsvc_path = "/usr/lib/bigtop-utils"
-
-hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
-namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
-namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
-
-jtnode_opt_newsize = "200m"
-jtnode_opt_maxnewsize = "200m"
-jtnode_heapsize =  "1024m"
-ttnode_heapsize = "1024m"
-
-dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
-mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-
-#log4j.properties
-
-yarn_log_dir_prefix = default("/configurations/yarn-env/yarn_log_dir_prefix","/var/log/hadoop-yarn")
-
-dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None)
-
-#log4j.properties
-if (('hdfs-log4j' in config['configurations']) and ('content' in config['configurations']['hdfs-log4j'])):
-  log4j_props = config['configurations']['hdfs-log4j']['content']
-  if (('yarn-log4j' in config['configurations']) and ('content' in config['configurations']['yarn-log4j'])):
-    log4j_props += config['configurations']['yarn-log4j']['content']
-else:
-  log4j_props = None
-
-refresh_topology = False
-command_params = config["commandParams"] if "commandParams" in config else None
-if command_params is not None:
-  refresh_topology = bool(command_params["refresh_topology"]) if "refresh_topology" in command_params else False
-  
-ambari_libs_dir = "/var/lib/ambari-agent/lib"
-is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled']
-default_fs = config['configurations']['core-site']['fs.defaultFS']
-
-#host info
-all_hosts = default("/clusterHostInfo/all_hosts", [])
-all_racks = default("/clusterHostInfo/all_racks", [])
-all_ipv4_ips = default("/clusterHostInfo/all_ipv4_ips", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-
-#topology files
-net_topology_script_file_path = "/etc/hadoop/conf/topology_script.py"
-net_topology_script_dir = os.path.dirname(net_topology_script_file_path)
-net_topology_mapping_data_file_name = 'topology_mappings.data'
-net_topology_mapping_data_file_path = os.path.join(net_topology_script_dir, net_topology_mapping_data_file_name)
-

+ 0 - 71
ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/rack_awareness.py

@@ -1,71 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-from resource_management.core.resources import File, Directory
-from resource_management.core.source import StaticFile, Template
-from resource_management.libraries.functions import format
-
-import os
-
-def create_topology_mapping():
-  import params
-  
-  path=params.net_topology_mapping_data_file_path 
-  parent_dir=os.path.dirname(path) 
-  # only create the parent directory and set its permission if it does not exist
-  if not os.path.exists(parent_dir): 
-    Directory(parent_dir, 
-              recursive=True, 
-              owner=params.hdfs_user, 
-              group=params.user_group) 
-
-  # placing the mappings file in the same folder where the topology script is located
-  File(path,
-       content=Template("topology_mappings.data.j2"),
-       owner=params.hdfs_user,
-       group=params.user_group,
-       # if there is no hadoop components, don't create the script
-       only_if=format("test -d {net_topology_script_dir}"),
-  )
-
-def create_topology_script():
-  import params
-
-  path=params.net_topology_script_file_path
-  parent_dir=os.path.dirname(path) 
-  # only create the parent directory and set its permission if it does not exist 
-  if not os.path.exists(parent_dir): 
-    Directory(parent_dir, 
-              recursive=True, 
-              owner=params.hdfs_user, 
-              group=params.user_group) 
-
-  # installing the topology script to the specified location
-  File(path,
-       content=StaticFile('topology_script.py'),
-       mode=0755,
-       only_if=format("test -d {net_topology_script_dir}"),
-  )
-
-  
-def create_topology_script_and_mapping():
-  import params
-  if params.has_hadoop_env:
-    create_topology_mapping()
-    create_topology_script()

+ 0 - 152
ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/shared_initialization.py

@@ -1,152 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil
-
-from resource_management import *
-
-def setup_hadoop():
-  """
-  Setup hadoop files and directories
-  """
-  import params
-
-  Execute(("setenforce","0"),
-          only_if="test -f /selinux/enforce",
-          not_if="(! which getenforce ) || (which getenforce && getenforce | grep -q Disabled)",
-          sudo=True,
-  )
-
-  #directories
-  if params.has_namenode:
-    Directory(params.hdfs_log_dir_prefix,
-              recursive=True,
-              owner='root',
-              group=params.user_group,
-              mode=0775,
-              cd_access='a',
-    )
-    Directory(params.hadoop_pid_dir_prefix,
-              recursive=True,
-              owner='root',
-              group='root',
-              cd_access='a',
-    )
-    Directory(params.hadoop_tmp_dir,
-              recursive=True,
-              owner=params.hdfs_user,
-              cd_access='a',
-              )
-  #files
-    if params.security_enabled:
-      tc_owner = "root"
-    else:
-      tc_owner = params.hdfs_user
-      
-    # if WebHDFS is not enabled we need this jar to create hadoop folders.
-    if not WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
-      # for source-code of jar goto contrib/fast-hdfs-resource
-      File(format("{ambari_libs_dir}/fast-hdfs-resource.jar"),
-           mode=0644,
-           content=StaticFile("fast-hdfs-resource.jar")
-      )
-      
-    if os.path.exists(params.hadoop_conf_dir):
-      File(os.path.join(params.hadoop_conf_dir, 'commons-logging.properties'),
-           mode=0644,
-           group=params.user_group,
-           owner=tc_owner,
-           content=Template('commons-logging.properties.j2')
-      )
-
-      health_check_template_name = "health_check"
-      File(os.path.join(params.hadoop_conf_dir, health_check_template_name),
-           owner=tc_owner,
-           content=Template(health_check_template_name + ".j2")
-      )
-
-      log4j_filename = os.path.join(params.hadoop_conf_dir, "log4j.properties")
-      if (params.log4j_props != None):
-        File(log4j_filename,
-             mode=0644,
-             group=params.user_group,
-             owner=params.hdfs_user,
-             content=params.log4j_props
-        )
-      elif (os.path.exists(format("{params.hadoop_conf_dir}/log4j.properties"))):
-        File(log4j_filename,
-             mode=0644,
-             group=params.user_group,
-             owner=params.hdfs_user,
-        )
-
-      File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
-           owner=params.hdfs_user,
-           content=Template("hadoop-metrics2.properties.j2")
-      )
-
-
-def setup_configs():
-  """
-  Creates configs for services HDFS mapred
-  """
-  import params
-
-  if params.has_namenode:
-    if os.path.exists(params.hadoop_conf_dir):
-      File(params.task_log4j_properties_location,
-           content=StaticFile("task-log4j.properties"),
-           mode=0755
-      )
-
-    if os.path.exists(os.path.join(params.hadoop_conf_dir, 'configuration.xsl')):
-      File(os.path.join(params.hadoop_conf_dir, 'configuration.xsl'),
-           owner=params.hdfs_user,
-           group=params.user_group
-      )
-    if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')):
-      File(os.path.join(params.hadoop_conf_dir, 'masters'),
-                owner=params.hdfs_user,
-                group=params.user_group
-      )
-
-  generate_include_file()
-
-
-def generate_include_file():
-  import params
-
-  if params.has_namenode and params.dfs_hosts and params.has_slaves:
-    include_hosts_list = params.slave_hosts
-    File(params.dfs_hosts,
-         content=Template("include_hosts_list.j2"),
-         owner=params.hdfs_user,
-         group=params.user_group
-    )
-
-def create_javahome_symlink():
-  if os.path.exists("/usr/jdk/jdk1.6.0_31") and not os.path.exists("/usr/jdk64/jdk1.6.0_31"):
-    Directory("/usr/jdk64/",
-         recursive=True,
-    )
-    Link("/usr/jdk/jdk1.6.0_31",
-         to="/usr/jdk64/jdk1.6.0_31",
-    )
-

+ 0 - 43
ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/commons-logging.properties.j2

@@ -1,43 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-#Logging Implementation
-
-#Log4J
-org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger
-
-#JDK Logger
-#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger

+ 0 - 21
ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/exclude_hosts_list.j2

@@ -1,21 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in hdfs_exclude_file %}
-{{host}}
-{% endfor %}

+ 0 - 88
ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/hadoop-metrics2.properties.j2

@@ -1,88 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# syntax: [prefix].[source|sink|jmx].[instance].[options]
-# See package.html for org.apache.hadoop.metrics2 for details
-
-{% if has_ganglia_server %}
-*.period=60
-
-*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-*.sink.ganglia.period=10
-
-# default for supportsparse is false
-*.sink.ganglia.supportsparse=true
-
-.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
-.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
-
-# Hook up to the server
-namenode.sink.ganglia.servers={{ganglia_server_host}}:8661
-datanode.sink.ganglia.servers={{ganglia_server_host}}:8659
-jobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662
-tasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658
-maptask.sink.ganglia.servers={{ganglia_server_host}}:8660
-reducetask.sink.ganglia.servers={{ganglia_server_host}}:8660
-resourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664
-nodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657
-historyserver.sink.ganglia.servers={{ganglia_server_host}}:8666
-journalnode.sink.ganglia.servers={{ganglia_server_host}}:8654
-nimbus.sink.ganglia.servers={{ganglia_server_host}}:8649
-supervisor.sink.ganglia.servers={{ganglia_server_host}}:8650
-
-resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
-
-{% endif %}
-{% if has_metric_collector %}
-
-*.period={{metrics_collection_period}}
-*.sink.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
-*.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
-*.sink.timeline.period={{metrics_collection_period}}
-*.sink.timeline.sendInterval={{metrics_report_interval}}000
-*.sink.timeline.slave.host.name = {{hostname}}
-
-datanode.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
-namenode.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
-resourcemanager.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
-nodemanager.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
-historyserver.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
-journalnode.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
-nimbus.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
-supervisor.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
-maptask.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
-reducetask.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
-
-resourcemanager.sink.timeline.tagsForPrefix.yarn=Queue
-
-{% endif %}

+ 0 - 81
ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/health_check.j2

@@ -1,81 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#!/bin/bash
-#
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-err=0;
-
-function check_disks {
-
-  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
-    fsdev=""
-    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
-    if [ -z "$fsdev" -a "$m" != "/mnt" ] ; then
-      msg_="$msg_ $m(u)"
-    else
-      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
-    fi
-  done
-
-  if [ -z "$msg_" ] ; then
-    echo "disks ok" ; exit 0
-  else
-    echo "$msg_" ; exit 2
-  fi
-
-}
-
-# Run all checks
-for check in disks ; do
-  msg=`check_${check}` ;
-  if [ $? -eq 0 ] ; then
-    ok_msg="$ok_msg$msg,"
-  else
-    err_msg="$err_msg$msg,"
-  fi
-done
-
-if [ ! -z "$err_msg" ] ; then
-  echo -n "ERROR $err_msg "
-fi
-if [ ! -z "$ok_msg" ] ; then
-  echo -n "OK: $ok_msg"
-fi
-
-echo
-
-# Success!
-exit 0

+ 0 - 21
ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/include_hosts_list.j2

@@ -1,21 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in slave_hosts %}
-{{host}}
-{% endfor %}

+ 0 - 24
ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/topology_mappings.data.j2

@@ -1,24 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-    #
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-[network_topology]
-{% for host in all_hosts %}
-{% if host in slave_hosts %}
-{{host}}={{all_racks[loop.index-1]}}
-{{all_ipv4_ips[loop.index-1]}}={{all_racks[loop.index-1]}}
-{% endif %}
-{% endfor %}

+ 0 - 68
ambari-server/src/main/resources/stacks/BigInsights/4.0/kerberos.json

@@ -1,68 +0,0 @@
-{
-  "properties": {
-    "realm": "${kerberos-env/realm}",
-    "keytab_dir": "/etc/security/keytabs"
-  },
-  "identities": [
-    {
-      "name": "spnego",
-      "principal": {
-        "value": "HTTP/_HOST@${realm}",
-        "type" : "service"
-      },
-      "keytab": {
-        "file": "${keytab_dir}/spnego.service.keytab",
-        "owner": {
-          "name": "root",
-          "access": "r"
-        },
-        "group": {
-          "name": "${cluster-env/user_group}",
-          "access": "r"
-        }
-      }
-    },
-    {
-      "name": "hdfs",
-      "principal": {
-        "value": "${hadoop-env/hdfs_user}@${realm}",
-        "type" : "user" ,
-        "configuration": "hadoop-env/hdfs_principal_name",
-        "local_username" : "${hadoop-env/hdfs_user}"
-      },
-      "keytab": {
-        "file": "${keytab_dir}/hdfs.headless.keytab",
-        "owner": {
-          "name": "${hadoop-env/hdfs_user}",
-          "access": "r"
-        },
-        "group": {
-          "name": "${cluster-env/user_group}",
-          "access": "r"
-        },
-        "configuration": "hadoop-env/hdfs_user_keytab"
-      }
-    },
-    {
-      "name": "smokeuser",
-      "principal": {
-        "value": "${cluster-env/smokeuser}@${realm}",
-        "type" : "user",
-        "configuration": "cluster-env/smokeuser_principal_name",
-        "local_username" : "${cluster-env/smokeuser}"
-      },
-      "keytab": {
-        "file": "${keytab_dir}/smokeuser.headless.keytab",
-        "owner": {
-          "name": "${cluster-env/smokeuser}",
-          "access": "r"
-        },
-        "group": {
-          "name": "${cluster-env/user_group}",
-          "access": "r"
-        },
-        "configuration": "cluster-env/smokeuser_keytab"
-      }
-    }
-  ]
-}

+ 0 - 22
ambari-server/src/main/resources/stacks/BigInsights/4.0/metainfo.xml

@@ -1,22 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <versions>
-	  <active>false</active>
-    </versions>
-</metainfo>

+ 0 - 265
ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_features.json

@@ -1,265 +0,0 @@
-{
-  "stack_features": [
-    {
-      "name": "snappy",
-      "description": "Snappy compressor/decompressor support",
-      "min_version": "2.0.0.0",
-      "max_version": "2.2.0.0"
-    },
-    {
-      "name": "lzo",
-      "description": "LZO libraries support",
-      "min_version": "2.2.1.0"
-    },
-    {
-      "name": "express_upgrade",
-      "description": "Express upgrade support",
-      "min_version": "4.0.0.0"
-    },
-    {
-      "name": "rolling_upgrade",
-      "description": "Rolling upgrade support",
-      "min_version": "4.0.0.0"
-    },
-    {
-      "name": "config_versioning",
-      "description": "Configurable versions support",
-      "min_version": "4.1.0.0"
-    },
-    {
-      "name": "datanode_non_root",
-      "description": "DataNode running as non-root support (AMBARI-7615)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "remove_ranger_hdfs_plugin_env",
-      "description": "HDFS removes Ranger env files (AMBARI-14299)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "ranger",
-      "description": "Ranger Service support",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "ranger_tagsync_component",
-      "description": "Ranger Tagsync component support (AMBARI-14383)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "phoenix",
-      "description": "Phoenix Service support",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "nfs",
-      "description": "NFS support",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "tez_for_spark",
-      "description": "Tez dependency for Spark",
-      "min_version": "2.2.0.0",
-      "max_version": "2.3.0.0"
-    },
-    {
-      "name": "timeline_state_store",
-      "description": "Yarn application timeline-service supports state store property (AMBARI-11442)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "copy_tarball_to_hdfs",
-      "description": "Copy tarball to HDFS support (AMBARI-12113)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "spark_16plus",
-      "description": "Spark 1.6+",
-      "min_version": "2.4.0.0"
-    },
-    {
-      "name": "spark_thriftserver",
-      "description": "Spark Thrift Server",
-      "min_version": "2.3.2.0"
-    },
-    {
-      "name": "storm_kerberos",
-      "description": "Storm Kerberos support (AMBARI-7570)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "storm_ams",
-      "description": "Storm AMS integration (AMBARI-10710)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "create_kafka_broker_id",
-      "description": "Ambari should create Kafka Broker Id (AMBARI-12678)",
-      "min_version": "2.2.0.0",
-      "max_version": "2.3.0.0"
-    },
-    {
-      "name": "kafka_listeners",
-      "description": "Kafka listeners (AMBARI-10984)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "kafka_kerberos",
-      "description": "Kafka Kerberos support (AMBARI-10984)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "pig_on_tez",
-      "description": "Pig on Tez support (AMBARI-7863)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "ranger_usersync_non_root",
-      "description": "Ranger Usersync as non-root user (AMBARI-10416)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "ranger_audit_db_support",
-      "description": "Ranger Audit to DB support",
-      "min_version": "2.2.0.0",
-      "max_version": "2.5.0.0"
-    },
-    {
-      "name": "accumulo_kerberos_user_auth",
-      "description": "Accumulo Kerberos User Auth (AMBARI-10163)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "knox_versioned_data_dir",
-      "description": "Use versioned data dir for Knox (AMBARI-13164)",
-      "min_version": "2.3.2.0"
-    },
-    {
-      "name": "knox_sso_topology",
-      "description": "Knox SSO Topology support (AMBARI-13975)",
-      "min_version": "2.3.8.0"
-    },
-    {
-      "name": "atlas_rolling_upgrade",
-      "description": "Rolling upgrade support for Atlas",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "oozie_admin_user",
-      "description": "Oozie install user as an Oozie admin user (AMBARI-7976)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "oozie_create_hive_tez_configs",
-      "description": "Oozie create configs for Ambari Hive and Tez deployments (AMBARI-8074)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "oozie_setup_shared_lib",
-      "description": "Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "oozie_host_kerberos",
-      "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
-      "min_version": "2.0.0.0",
-      "max_version": "2.2.0.0"
-    },
-    {
-      "name": "falcon_extensions",
-      "description": "Falcon Extension",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "hive_metastore_upgrade_schema",
-      "description": "Hive metastore upgrade schema support (AMBARI-11176)",
-      "min_version": "2.3.0.0"
-     },
-    {
-      "name": "hive_server_interactive",
-      "description": "Hive server interactive support (AMBARI-15573)",
-      "min_version": "2.5.0.0"
-     },
-    {
-      "name": "hive_webhcat_specific_configs",
-      "description": "Hive webhcat specific configurations support (AMBARI-12364)",
-      "min_version": "2.3.0.0"
-     },
-    {
-      "name": "hive_purge_table",
-      "description": "Hive purge table support (AMBARI-12260)",
-      "min_version": "2.3.0.0"
-     },
-    {
-      "name": "hive_server2_kerberized_env",
-      "description": "Hive server2 working on kerberized environment (AMBARI-13749)",
-      "min_version": "2.2.3.0",
-      "max_version": "2.2.5.0"
-     },
-    {
-      "name": "hive_env_heapsize",
-      "description": "Hive heapsize property defined in hive-env (AMBARI-12801)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "ranger_kms_hsm_support",
-      "description": "Ranger KMS HSM support (AMBARI-15752)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_log4j_support",
-      "description": "Ranger supporting log-4j properties (AMBARI-15681)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_kerberos_support",
-      "description": "Ranger Kerberos support",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "hive_metastore_site_support",
-      "description": "Hive Metastore site support",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_usersync_password_jceks",
-      "description": "Saving Ranger Usersync credentials in jceks",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_install_logsearch_client",
-      "description": "LogSearch Service support",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "hbase_home_directory",
-      "description": "Hbase home directory in HDFS needed for HBASE backup",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "spark_livy",
-      "description": "Livy as slave component of spark",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "atlas_ranger_plugin_support",
-      "description": "Atlas Ranger plugin support",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "atlas_upgrade_support",
-      "description": "Atlas supports express and rolling upgrades",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_pid_support",
-      "description": "Ranger Service support pid generation AMBARI-16756",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_kms_pid_support",
-      "description": "Ranger KMS Service support pid generation",
-      "min_version": "2.5.0.0"
-    }
-  ]
-}

+ 0 - 4
ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_tools.json

@@ -1,4 +0,0 @@
-{
-  "stack_selector": ["iop-select", "/usr/bin/iop-select", "iop-select"],
-  "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"]
-}

+ 0 - 35
ambari-server/src/main/resources/stacks/BigInsights/4.0/repos/repoinfo.xml

@@ -1,35 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<reposinfo>
-  <!-- TODO define latest json file for iop 
-  <latest>http://s3.amazonaws.com/dev.hortonworks.com/HDP/hdp_urlinfo.json</latest>
-  -->
-  <mainrepoid>IOP-4.0</mainrepoid>
-  <os family="redhat6">
-    <repo>
-      <baseurl>http://ibm-open-platform.ibm.com/repos/IOP/RHEL6/x86_64/4.0</baseurl>
-      <repoid>IOP-4.0</repoid>
-      <reponame>IOP</reponame>
-    </repo>
-    <repo>
-      <baseurl>http://ibm-open-platform.ibm.com/repos/IOP-UTILS/RHEL6/x86_64/1.0</baseurl>
-      <repoid>IOP-UTILS-1.0</repoid>
-      <reponame>IOP-UTILS</reponame>
-    </repo>
-  </os>
-</reposinfo>

+ 0 - 70
ambari-server/src/main/resources/stacks/BigInsights/4.0/role_command_order.json

@@ -1,70 +0,0 @@
-{
-  "_comment" : "Record format:",
-  "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
-  "general_deps" : {
-    "_comment" : "dependencies for all cases",
-    "HBASE_MASTER-START": ["ZOOKEEPER_SERVER-START"],
-    "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
-    "OOZIE_SERVER-START": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
-    "WEBHCAT_SERVER-START": ["NODEMANAGER-START", "HIVE_SERVER-START"],
-    "WEBHCAT_SERVER-RESTART": ["NODEMANAGER-RESTART", "HIVE_SERVER-RESTART"],
-    "HIVE_METASTORE-START": ["MYSQL_SERVER-START", "NAMENODE-START"],
-    "HIVE_METASTORE-RESTART": ["MYSQL_SERVER-RESTART", "NAMENODE-RESTART"],
-    "HIVE_SERVER-START": ["NODEMANAGER-START", "MYSQL_SERVER-START"],
-    "HIVE_SERVER-RESTART": ["NODEMANAGER-RESTART", "MYSQL_SERVER-RESTART"],
-    "FLUME_HANDLER-START": ["OOZIE_SERVER-START"],
-    "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
-    "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START", "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK"],
-    "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
-    "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START", "WEBHCAT_SERVER-START"],
-    "HCAT_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START"],
-    "WEBHCAT_SERVICE_CHECK-SERVICE_CHECK": ["WEBHCAT_SERVER-START"],
-    "PIG_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
-    "SQOOP_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
-    "ZOOKEEPER_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
-    "ZOOKEEPER_QUORUM_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
-    "ZOOKEEPER_SERVER-STOP" : ["HBASE_MASTER-STOP", "HBASE_REGIONSERVER-STOP"],
-    "HBASE_MASTER-STOP": ["HBASE_REGIONSERVER-STOP"],
-    "SLIDER_SERVICE_CHECK-SERVICE_CHECK" : ["NODEMANAGER-START", "RESOURCEMANAGER-START"],    
-    "SPARK_SERVICE_CHECK-SERVICE_CHECK" : ["SPARK_JOBHISTORYSERVER-START", "SPARK_THRIFTSERVER-START", "APP_TIMELINE_SERVER-START"],
-    "FLUME_SERVICE_CHECK-SERVICE_CHECK": ["FLUME_HANDLER-START"]
-  },
-  "_comment" : "GLUSTERFS-specific dependencies",
-  "optional_glusterfs": {
-    "HBASE_MASTER-START": ["PEERSTATUS-START"],
-    "GLUSTERFS_SERVICE_CHECK-SERVICE_CHECK": ["PEERSTATUS-START"]
-  },
-  "_comment" : "Dependencies that are used when GLUSTERFS is not present in cluster",
-  "optional_no_glusterfs": {
-    "SECONDARY_NAMENODE-START": ["NAMENODE-START"],
-    "RESOURCEMANAGER-START": ["NAMENODE-START", "DATANODE-START"],
-    "NODEMANAGER-START": ["NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START"],
-    "HISTORYSERVER-START": ["NAMENODE-START", "DATANODE-START"],
-    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START"],
-    "HIVE_SERVER-START": ["DATANODE-START"],
-    "WEBHCAT_SERVER-START": ["DATANODE-START"],
-    "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START",
-        "SECONDARY_NAMENODE-START"],
-    "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START",
-        "RESOURCEMANAGER-START", "HISTORYSERVER-START", "YARN_SERVICE_CHECK-SERVICE_CHECK"],
-    "YARN_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
-    "RESOURCEMANAGER_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START"],
-    "PIG_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START", "NODEMANAGER-START"],
-    "NAMENODE-STOP": ["RESOURCEMANAGER-STOP", "NODEMANAGER-STOP",
-        "HISTORYSERVER-STOP", "HBASE_MASTER-STOP"],
-    "DATANODE-STOP": ["RESOURCEMANAGER-STOP", "NODEMANAGER-STOP",
-        "HISTORYSERVER-STOP", "HBASE_MASTER-STOP"],
-    "SPARK_JOBHISTORYSERVER-START" : ["NAMENODE-START"],
-    "SPARK_THRIFTSERVER-START" : ["NAMENODE-START", "HIVE_METASTORE-START"],
-    "APP_TIMELINE_SERVER-START": ["NAMENODE-START", "DATANODE-START"]
-  },
-  "_comment" : "Dependencies that are used in HA NameNode cluster",
-  "namenode_optional_ha": {
-    "NAMENODE-START": ["ZKFC-START", "JOURNALNODE-START", "ZOOKEEPER_SERVER-START"],
-    "ZKFC-START": ["ZOOKEEPER_SERVER-START"]
-  },
-  "_comment" : "Dependencies that are used in ResourceManager HA cluster",
-  "resourcemanager_optional_ha" : {
-    "RESOURCEMANAGER-START": ["ZOOKEEPER_SERVER-START"]
-  }
-}

+ 0 - 27
ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/metainfo.xml

@@ -1,27 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>FLUME</name>
-      <extends>common-services/FLUME/1.4.0.2.0</extends>
-    </service>
-  </services>
-</metainfo>
-

+ 0 - 27
ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/metainfo.xml

@@ -1,27 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>HBASE</name>
-      <extends>common-services/HBASE/0.96.0.2.0</extends>
-    </service>
-  </services>
-</metainfo>
-

+ 0 - 26
ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/metainfo.xml

@@ -1,26 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>HDFS</name>
-      <extends>common-services/HDFS/2.1.0.2.0</extends>
-    </service>
-  </services>
-</metainfo>

+ 0 - 26
ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/metainfo.xml

@@ -1,26 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>HIVE</name>
-      <extends>common-services/HIVE/0.12.0.2.0</extends>
-    </service>
-  </services>
-</metainfo>

+ 0 - 26
ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/metainfo.xml

@@ -1,26 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>KNOX</name>
-      <extends>common-services/KNOX/0.5.0.2.2</extends>
-    </service>
-  </services>
-</metainfo>

+ 0 - 26
ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/metainfo.xml

@@ -1,26 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>OOZIE</name>
-      <extends>common-services/OOZIE/4.0.0.2.0</extends>
-    </service>
-  </services>
-</metainfo>

+ 0 - 27
ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/metainfo.xml

@@ -1,27 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>PIG</name>
-      <extends>common-services/PIG/0.12.0.2.0</extends>
-    </service>
-  </services>
-</metainfo>
-

+ 0 - 27
ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/metainfo.xml

@@ -1,27 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>SLIDER</name>
-      <extends>common-services/SLIDER/0.60.0.2.2</extends>
-    </service>
-  </services>
-</metainfo>
-

+ 0 - 28
ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/metainfo.xml

@@ -1,28 +0,0 @@
-<?xml version="1.0"?>
-<!--Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
--->
-<metainfo>
-    <schemaVersion>2.0</schemaVersion>
-    <services>
-        <service>
-            <name>SPARK</name>
-            <extends>common-services/SPARK/1.2.1</extends>		
-        </service>
-    </services>
-</metainfo>
-

+ 0 - 27
ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SQOOP/metainfo.xml

@@ -1,27 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>SQOOP</name>
-      <extends>common-services/SQOOP/1.4.4.2.0</extends>
-    </service>
-  </services>
-</metainfo>
-

+ 0 - 30
ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/metainfo.xml

@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>YARN</name>
-      <extends>common-services/YARN/2.1.0.2.0</extends>
-    </service>
-    <service>
-      <name>MAPREDUCE2</name>
-      <extends>common-services/MAPREDUCE2/2.1.0.2.0.6.0</extends>
-    </service>
-  </services>
-</metainfo>

+ 0 - 26
ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/metainfo.xml

@@ -1,26 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>ZOOKEEPER</name>
-      <extends>common-services/ZOOKEEPER/3.4.5</extends>
-    </service>
-  </services>
-</metainfo>

+ 0 - 24
ambari-server/src/main/resources/stacks/BigInsights/4.0/services/stack_advisor.py

@@ -1,24 +0,0 @@
-#!/usr/bin/env ambari-python-wrap
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from stack_advisor_23 import *
-
-class BigInsights40StackAdvisor(HDP23StackAdvisor):
-
-  pass

+ 0 - 1
ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_206.py

@@ -1 +0,0 @@
-../../../HDP/2.0.6/services/stack_advisor.py

+ 0 - 1
ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_21.py

@@ -1 +0,0 @@
-../../../HDP/2.1/services/stack_advisor.py

+ 0 - 1
ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_22.py

@@ -1 +0,0 @@
-../../../HDP/2.2/services/stack_advisor.py

+ 0 - 1
ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_23.py

@@ -1 +0,0 @@
-../../../HDP/2.3/services/stack_advisor.py

+ 0 - 110
ambari-server/src/main/resources/stacks/BigInsights/4.0/upgrades/config-upgrade.xml

@@ -1,110 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<upgrade-config-changes xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
-  <services>
-    <service name="HDFS">
-      <component name="NAMENODE">
-        <changes>
-          <definition xsi:type="configure" id="biginsights_4_0_namenode_update_hadoop_env" summary="Update Hadoop env">
-            <type>hadoop-env</type>
-            <replace key="content" find="export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/iop/current/hadoop-client/lib/native/Linux-amd64-64" replace-with="export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/iop/current/hadoop-client/lib/native" />
-          </definition>
-        </changes>
-      </component>
-    </service>
-    
-    <service name="MAPREDUCE2">
-      <component name="HISTORYSERVER">
-        <changes>
-          <definition xsi:type="configure" id="biginsights_4_0_mapreduce_application_framework_patch" summary="Update MapReduce2 configurations">
-            <type>mapred-site</type>
-            <set key="mapreduce.application.framework.path" value="/iop/apps/${iop.version}/mapreduce/mapreduce.tar.gz#mr-framework"/>
-          </definition>
-        </changes>
-      </component>
-    </service>
-    
-    <service name="HIVE">
-      <component name="HIVE_SERVER">
-        <changes>
-          <definition xsi:type="configure" id="biginsights_4_0_hive_server_set_transport_mode">
-            <condition type="hive-site" key="hive.server2.transport.mode" value="binary">
-              <type>hive-site</type>
-              <key>hive.server2.thrift.port</key>
-              <value>10010</value>
-            </condition>
-            <condition type="hive-site" key="hive.server2.transport.mode" value="http">
-              <type>hive-site</type>
-              <key>hive.server2.http.port</key>
-              <value>10011</value>
-            </condition>
-          </definition>
-
-          <definition xsi:type="configure" id="biginsights_4_0_hive_server_restore_transport_mode_on_downgrade">
-            <condition type="hive-site" key="hive.server2.transport.mode" value="binary">
-              <type>hive-site</type>
-              <key>hive.server2.thrift.port</key>
-              <value>10000</value>
-            </condition>
-            <condition type="hive-site" key="hive.server2.transport.mode" value="http">
-              <type>hive-site</type>
-              <key>hive.server2.http.port</key>
-              <value>10001</value>
-            </condition>
-          </definition>
-        </changes>
-      </component>
-      
-      <component name="WEBHCAT_SERVER">
-        <changes>
-          <definition xsi:type="configure" id="biginsights_4_0_webhcat_server_update_environment_configurations" summary="Update Hadoop home">
-            <type>webhcat-env</type>
-            <replace key="content" find="export HADOOP_HOME={{hadoop_home}}" replace-with="export HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}" />
-          </definition>
-          
-          <definition xsi:type="configure" id="biginsights_4_0_webhcat_server_update_configurations" summary="Updating Configuration Paths">
-            <type>webhcat-site</type>
-            <replace key="templeton.jar" find="/usr/iop/current/hive-webhcat" replace-with="/usr/iop/${iop.version}/hive"/>
-            <replace key="templeton.libjars" find="/usr/iop/current/zookeeper-client" replace-with="/usr/iop/${iop.version}/zookeeper"/>
-            <replace key="templeton.hadoop" find="/usr/iop/current/hadoop-client" replace-with="/usr/iop/${iop.version}/hadoop"/>
-            <replace key="templeton.hcat" find="/usr/iop/current/hive-client" replace-with="/usr/iop/${iop.version}/hive"/>
-          </definition>
-        </changes>
-      </component>
-    </service>
-    
-    <service name="OOZIE">
-      <component name="OOZIE_SERVER">
-        <changes>
-          <definition xsi:type="configure" id="biginsights_4_0_oozie_server_update_configurations" Summary="Updating oozie-site configurations">
-            <condition type="oozie-site" key="oozie.services" value="org.apache.oozie.service.SchedulerService,      org.apache.oozie.service.InstrumentationService,      org.apache.oozie.service.CallableQueueService,      org.apache.oozie.service.UUIDService,      org.apache.oozie.service.ELService,      org.apache.oozie.service.AuthorizationService,      org.apache.oozie.service.UserGroupInformationService,      org.apache.oozie.service.HadoopAccessorService,   org.apache.oozie.service.JobsConcurrencyService,      org.apache.oozie.service.URIHandlerService,      org.apache.oozie.service.MemoryLocksService,      org.apache.oozie.service.DagXLogInfoService,      org.apache.oozie.service.SchemaService,      org.apache.oozie.service.LiteWorkflowAppService,      org.apache.oozie.service.JPAService,      org.apache.oozie.service.StoreService,      org.apache.oozie.service.SLAStoreService,      org.apache.oozie.service.DBLiteWorkflowStoreService,      org.apache.oozie.service.CallbackService,   org.apache.oozie.service.ActionService, org.apache.oozie.service.ShareLibService,      org.apache.oozie.service.ActionCheckerService,      org.apache.oozie.service.RecoveryService,      org.apache.oozie.service.PurgeService,      org.apache.oozie.service.CoordinatorEngineService,      org.apache.oozie.service.BundleEngineService,      org.apache.oozie.service.DagEngineService,      org.apache.oozie.service.CoordMaterializeTriggerService,      org.apache.oozie.service.StatusTransitService,      org.apache.oozie.service.PauseTransitService,      org.apache.oozie.service.GroupsService,      org.apache.oozie.service.ProxyUserService,    org.apache.oozie.service.XLogStreamingService,      org.apache.oozie.service.JvmPauseMonitorService">
-              <type>oozie-site</type>
-              <key>oozie.services</key>
-              <value>org.apache.oozie.service.SchedulerService,      org.apache.oozie.service.InstrumentationService,      org.apache.oozie.service.CallableQueueService,      org.apache.oozie.service.UUIDService,      org.apache.oozie.service.ELService,      org.apache.oozie.service.AuthorizationService,      org.apache.oozie.service.UserGroupInformationService,      org.apache.oozie.service.HadoopAccessorService,   org.apache.oozie.service.JobsConcurrencyService,      org.apache.oozie.service.URIHandlerService,      org.apache.oozie.service.MemoryLocksService,      org.apache.oozie.service.DagXLogInfoService,      org.apache.oozie.service.SchemaService,      org.apache.oozie.service.LiteWorkflowAppService,      org.apache.oozie.service.JPAService,      org.apache.oozie.service.StoreService,      org.apache.oozie.service.SLAStoreService,      org.apache.oozie.service.DBLiteWorkflowStoreService,      org.apache.oozie.service.CallbackService,   org.apache.oozie.service.ActionService, org.apache.oozie.service.ShareLibService,      org.apache.oozie.service.ActionCheckerService,      org.apache.oozie.service.RecoveryService,      org.apache.oozie.service.PurgeService,      org.apache.oozie.service.CoordinatorEngineService,      org.apache.oozie.service.BundleEngineService,      org.apache.oozie.service.DagEngineService,      org.apache.oozie.service.CoordMaterializeTriggerService,      org.apache.oozie.service.StatusTransitService,      org.apache.oozie.service.PauseTransitService,      org.apache.oozie.service.GroupsService,      org.apache.oozie.service.ProxyUserService,    org.apache.oozie.service.XLogStreamingService,      org.apache.oozie.service.JvmPauseMonitorService,     org.apache.oozie.service.SparkConfigurationService</value>
-            </condition>
-          </definition>
-          <definition xsi:type="configure" id="biginsights_4_0_oozie_server_update_environment_configurations" summary="Update oozie env">
-            <type>oozie-env</type>
-            <replace key="content" find="export CATALINA_BASE=${CATALINA_BASE:-{{oozie_server_dir}}}" replace-with="export CATALINA_BASE={{oozie_server_dir}}" />            
-          </definition>
-        </changes>
-      </component>
-    </service>
-  </services>
-</upgrade-config-changes>

+ 0 - 717
ambari-server/src/main/resources/stacks/BigInsights/4.0/upgrades/nonrolling-upgrade-4.1.xml

@@ -1,717 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-
-<upgrade xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
-  <target>4.1.*.*</target>
-  <target-stack>BigInsights-4.1</target-stack>
-  <type>NON_ROLLING</type>
-  
-  <prerequisite-checks>
-    <configuration>
-      <!-- Configuration properties for all pre-reqs including required pre-reqs -->
-      <check-properties name="org.apache.ambari.server.checks.HiveDynamicServiceDiscoveryCheck">
-        <property name="min-failure-stack-version">BigInsights-4.1</property>
-      </check-properties>
-    </configuration>
-  </prerequisite-checks>
-  
-  <order>
-
-    <group xsi:type="cluster" name="PRE_CLUSTER" title="Prepare Upgrade">
-      <direction>UPGRADE</direction>
-      <skippable>true</skippable>
-      <supports-auto-skip-failure>false</supports-auto-skip-failure>
-      
-      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Stop YARN Queues">
-        <task xsi:type="manual">
-          <message>Before continuing, please stop all YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
-        </task>
-      </execute-stage>
-
-      <execute-stage service="SLIDER" component="SLIDER" title="Stop Long Running Applications on Slider">
-        <task xsi:type="manual">
-          <message>Before continuing, please stop all long-running applications deployed using Slider. E.g., su - yarn "/usr/iop/current/slider-client/bin/slider stop &lt;app_name&gt;"</message>
-        </task>
-      </execute-stage>
-    </group>
-    
-    <group xsi:type="stop" name="STOP_HIGH_LEVEL_SERVICE_COMPONENTS" title="Stop Components for High-Level Services">
-      <direction>UPGRADE</direction>
-      <skippable>true</skippable>
-      <service-check>false</service-check>
-      <supports-auto-skip-failure>false</supports-auto-skip-failure>
-      <parallel-scheduler/>
-
-      <service name="FLUME">
-        <component>FLUME_HANDLER</component>
-      </service>
-
-      <service name="KNOX">
-        <component>KNOX_GATEWAY</component>
-      </service>
-
-      <service name="KAFKA">
-        <component>KAFKA_BROKER</component>
-      </service>
-
-      <service name="OOZIE">
-        <component>OOZIE_SERVER</component>
-      </service>
-
-      <service name="SPARK">
-        <component>SPARK_JOBHISTORYSERVER</component>
-        <component>SPARK_THRIFTSERVER</component>
-      </service>
-
-      <service name="HIVE">
-        <component>WEBHCAT_SERVER</component>
-        <component>HIVE_SERVER</component>
-        <component>HIVE_METASTORE</component>
-      </service>
-
-      <service name="YARN">
-        <component>NODEMANAGER</component>
-        <component>RESOURCEMANAGER</component>
-        <component>APP_TIMELINE_SERVER</component>
-      </service>
-
-      <service name="MAPREDUCE2">
-        <component>HISTORYSERVER</component>
-      </service>
-    </group>
-    
-    <group xsi:type="cluster" name="Backups" title="Perform Backups">
-      <direction>UPGRADE</direction>
-      <skippable>true</skippable>
-      <supports-auto-skip-failure>false</supports-auto-skip-failure>
-      
-      <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Backup Oozie Database">
-        <task xsi:type="manual">
-          <message>Before continuing, please backup the Oozie Server database referenced by the Oozie server located on {{hosts.all}}.</message>
-        </task>
-      </execute-stage>
-      
-      <execute-stage service="HIVE" component="HIVE_METASTORE" title="Backup Hive Metastore">
-        <task xsi:type="manual">
-          <message>Before continuing, please backup the Hive Metastore database referenced by the Hive Metastore service(s) on the following host(s): {{hosts.all}}.</message>
-        </task>
-      </execute-stage>
-
-      <execute-stage service="KNOX" component="KNOX_GATEWAY" title="Backup Knox Data">
-       <task xsi:type="manual">
-          <message>Before continuing, please backup the Knox data. E.g., "cp -RL /usr/iop/current/knox-server/data/* ~/knox_backup/" on the following host(s): {{hosts.all}}.</message>
-        </task>
-      </execute-stage>
-
-      <execute-stage service="HBASE" component="HBASE_MASTER" title="Pre Upgrade HBase">
-        <task xsi:type="execute" hosts="master">
-          <script>scripts/hbase_upgrade.py</script>
-          <function>snapshot</function>
-        </task>
-      </execute-stage>
-      
-     <execute-stage service="HDFS" component="NAMENODE" title="Prepare HDFS">
-        <task xsi:type="execute" hosts="master">
-          <script>scripts/namenode.py</script>
-          <function>prepare_express_upgrade</function>
-        </task>
-     </execute-stage>
-    </group>
-
-    <group xsi:type="stop" name="STOP_LOW_LEVEL_SERVICE_COMPONENTS" title="Stop Components for Core Services">
-      <direction>UPGRADE</direction>
-      <skippable>true</skippable>
-      <supports-auto-skip-failure>false</supports-auto-skip-failure>
-      <parallel-scheduler/>
-      <service-check>false</service-check>
-
-      <service name="HBASE">
-        <component>HBASE_REGIONSERVER</component>
-        <component>HBASE_MASTER</component>
-      </service>
-
-      <service name="HDFS">
-        <component>DATANODE</component>
-        <component>NAMENODE</component>
-        <component>SECONDARY_NAMENODE</component>
-        <component>ZKFC</component>
-        <component>JOURNALNODE</component>
-      </service>
-
-      <service name="ZOOKEEPER">
-        <component>ZOOKEEPER_SERVER</component>
-      </service>
-    </group>
-    
-    <group xsi:type="cluster" name="Restore Backups" title="Restore Backups">
-      <direction>DOWNGRADE</direction>
-      <skippable>true</skippable>
-
-      <!-- If the user attempts a downgrade after this point, they will need to restore backups
-      before starting any of the services. -->
-
-      <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Restore Oozie Database">
-        <task xsi:type="manual">
-          <message>Before continuing, please restore the Oozie Server database referenced by the Oozie server located on {{hosts.all}}.</message>
-        </task>
-      </execute-stage>
-
-      <execute-stage service="HIVE" component="HIVE_METASTORE" title="Restore Hive Metastore">
-        <task xsi:type="manual">
-          <message>Before continuing, please restore the Hive Metastore database located on the following host(s): {{hosts.all}}.</message>
-        </task>
-      </execute-stage>
-
-      <execute-stage service="KNOX" component="KNOX_GATEWAY" title="Restore Knox Data">
-        <task xsi:type="manual">
-          <message>Before continuing, please restore the Knox data. E.g., "cp -RL ~/knox_backup/* /usr/iop/current/knox-server/data/" on the following host(s): {{hosts.all}}.</message>
-        </task>
-      </execute-stage>
-    </group>
-    
-     <!-- After processing this group, will change the effective Stack of the UpgradeContext object. -->
-    <group xsi:type="update-stack" name="UPDATE_DESIRED_STACK_ID" title="Update Target Stack">
-      <execute-stage title="Update Target Stack" service="" component="">
-        <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.UpdateDesiredStackAction">
-        </task>
-      </execute-stage>
-    </group>
-    
-    <!-- Now, restart all of the services. -->
-    <group xsi:type="restart" name="ZOOKEEPER" title="ZooKeeper">
-      <service-check>false</service-check>
-      <skippable>true</skippable>
-      <supports-auto-skip-failure>false</supports-auto-skip-failure>
-      <parallel-scheduler/>
-      <service name="ZOOKEEPER">
-        <service-check>true</service-check>
-        <component>ZOOKEEPER_SERVER</component>
-        <component>ZOOKEEPER_CLIENT</component>
-      </service>
-    </group>
-    
-    <group xsi:type="restart" name="HDFS" title="HDFS">
-      <service-check>false</service-check>
-      <skippable>true</skippable>
-      <supports-auto-skip-failure>false</supports-auto-skip-failure>
-      <parallel-scheduler/>
-      <service name="HDFS">
-        <component>JOURNALNODE</component>
-        <component>ZKFC</component>
-        <component>NAMENODE</component>
-        <component>SECONDARY_NAMENODE</component>
-        <component>HDFS_CLIENT</component>
-      </service>
-    </group>
-
-    <group xsi:type="restart" name="HDFS_DATANODES" title="HDFS DataNodes">
-      <service-check>false</service-check>
-      <skippable>true</skippable>
-      <parallel-scheduler/>
-      <service name="HDFS">
-        <component>DATANODE</component>
-      </service>
-    </group>
-
-    <group xsi:type="cluster" name="HDFS_LEAVE_SAFEMODE" title="HDFS - Wait to leave Safemode">
-      <service-check>false</service-check>
-      <skippable>true</skippable>
-      <supports-auto-skip-failure>false</supports-auto-skip-failure>
-
-      <execute-stage service="HDFS" component="NAMENODE" title="Wait to leave Safemode">
-        <task xsi:type="execute" hosts="all" summary="Wait for NameNode to leave Safemode">
-          <script>scripts/namenode.py</script>
-          <function>wait_for_safemode_off</function>
-        </task>
-      </execute-stage>
-    </group>
-
-    <group xsi:type="restart" name="YARN_AND_MAPR" title="YARN and MapReduce2">
-      <service-check>false</service-check>
-      <skippable>true</skippable>
-      <supports-auto-skip-failure>false</supports-auto-skip-failure>
-      <parallel-scheduler/>
-
-      <service name="MAPREDUCE2">
-        <component>HISTORYSERVER</component>
-        <component>MAPREDUCE2_CLIENT</component>
-      </service>
-
-      <service name="YARN">
-        <component>APP_TIMELINE_SERVER</component>
-        <component>RESOURCEMANAGER</component>
-        <component>YARN_CLIENT</component>
-      </service>
-    </group>
-
-    <group xsi:type="restart" name="YARN_NODEMANAGERS" title="YARN NodeManagers">
-      <service-check>false</service-check>
-      <skippable>true</skippable>
-      <parallel-scheduler/>
-
-      <service name="YARN">
-        <component>NODEMANAGER</component>
-      </service>
-    </group>
-
-    <group xsi:type="restart" name="HBASE" title="HBASE">
-      <service-check>false</service-check>
-      <skippable>true</skippable>
-      <supports-auto-skip-failure>false</supports-auto-skip-failure>
-      <parallel-scheduler/>
-      <service name="HBASE">
-        <component>HBASE_MASTER</component>
-        <component>HBASE_REGIONSERVER</component>
-        <component>HBASE_CLIENT</component>
-      </service>
-    </group>
-
-    <group xsi:type="restart" name="CLIENTS" title="Pig, Sqoop Clients">
-      <service-check>false</service-check>
-      <skippable>true</skippable>
-      <parallel-scheduler/>
-      <service name="PIG">
-        <component>PIG</component>
-      </service>
-
-      <service name="SQOOP">
-        <component>SQOOP</component>
-      </service>
-    </group>
-
-    <group name="SERVICE_CHECK" title="All Service Checks" xsi:type="service-check">
-      <skippable>true</skippable>
-      <direction>UPGRADE</direction>
-      <priority>
-        <service>HBASE</service>
-        <service>MAPREDUCE2</service>
-        <service>YARN</service>
-        <service>HDFS</service>
-      </priority>
-    </group>
-
-    <group xsi:type="restart" name="HIVE_MASTERS" title="Hive Masters">
-      <service-check>false</service-check>
-      <skippable>true</skippable>
-      <supports-auto-skip-failure>false</supports-auto-skip-failure>
-      <!-- Must be ran sequentially because Hive Metastore upgrades the schema and Hive Server copies tarballs. -->
-      <parallel-scheduler>
-        <max-degree-of-parallelism>1</max-degree-of-parallelism>
-      </parallel-scheduler>
-      <service name="HIVE">
-        <component>HIVE_METASTORE</component>
-        <component>HIVE_SERVER</component>
-        <component>WEBHCAT_SERVER</component>
-      </service>
-    </group>
-
-    <group xsi:type="restart" name="HIVE_CLIENTS" title="Hive Clients">
-      <service-check>false</service-check>
-      <skippable>true</skippable>
-      <supports-auto-skip-failure>false</supports-auto-skip-failure>
-      <parallel-scheduler/>
-      <service name="HIVE">
-         <component>HIVE_CLIENT</component>
-         <component>HCAT</component>
-      </service>
-    </group>
-
-    <group xsi:type="restart" name="SPARK" title="Spark">
-      <service-check>false</service-check>
-      <skippable>true</skippable>
-      <supports-auto-skip-failure>false</supports-auto-skip-failure>
-      <parallel-scheduler/>
-      <service name="SPARK">
-        <component>SPARK_JOBHISTORYSERVER</component>
-        <component>SPARK_THRIFTSERVER</component>
-      </service>
-    </group>
-
-    <group xsi:type="restart" name="SPARK_CLIENTS" title="Spark Clients">
-      <service-check>false</service-check>
-      <skippable>true</skippable>
-      <parallel-scheduler/>
-      <service name="SPARK">
-        <component>SPARK_CLIENT</component>
-      </service>
-    </group>
-
-    <!-- Upgrade Oozie DB only on Upgrade direction, and always create a new ShareLib. -->
-    <group name="UPGRADE_OOZIE" title="Upgrade Oozie Database">
-      <direction>UPGRADE</direction>
-      <skippable>true</skippable>
-      <supports-auto-skip-failure>false</supports-auto-skip-failure>
-      <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Upgrade Oozie Database">
-        <task xsi:type="execute" hosts="any" summary="Upgrading the database and creating a new sharelib">
-          <script>scripts/oozie_server_upgrade.py</script>
-          <function>upgrade_oozie_database_and_sharelib</function>
-        </task>
-      </execute-stage>
-    </group>
-
-    <!-- Only create the ShareLib folder during a Downgrade. -->
-    <group name="DOWNGRADE_OOZIE" title="Downgrade Oozie ShareLib">
-      <direction>DOWNGRADE</direction>
-      <skippable>true</skippable>
-      <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Downgrade Oozie ShareLib">
-        <task xsi:type="execute" hosts="any" summary="Upgrading the database and creating a new sharelib">
-          <script>scripts/oozie_server_upgrade.py</script>
-          <function>create_sharelib</function>
-        </task>
-      </execute-stage>
-    </group>
-
-    <group xsi:type="restart" name="OOZIE" title="Oozie">
-      <service-check>false</service-check>
-      <skippable>true</skippable>
-      <supports-auto-skip-failure>false</supports-auto-skip-failure>
-      <parallel-scheduler/>
-      <service name="OOZIE">
-        <component>OOZIE_SERVER</component>
-      </service>
-    </group>
-
-    <group xsi:type="restart" name="OOZIE_CLIENTS" title="Oozie Clients">
-      <service-check>false</service-check>
-      <skippable>true</skippable>
-      <parallel-scheduler/>
-      <service name="OOZIE">
-        <component>OOZIE_CLIENT</component>
-      </service>
-    </group>
-
-    <group xsi:type="restart" name="KAFKA" title="Kafka">
-      <service-check>false</service-check>
-      <skippable>true</skippable>
-      <parallel-scheduler/>
-      <service name="KAFKA">
-        <component>KAFKA_BROKER</component>
-      </service>
-    </group>
-
-    <group xsi:type="restart" name="KNOX" title="Knox">
-      <service-check>false</service-check>
-      <skippable>true</skippable>
-      <parallel-scheduler/>
-      <service name="KNOX">
-        <component>KNOX_GATEWAY</component>
-      </service>
-    </group>
-
-    <group xsi:type="restart" name="SLIDER" title="Slider">
-      <service-check>false</service-check>
-      <skippable>true</skippable>
-      <parallel-scheduler/>
-      <service name="SLIDER">
-        <component>SLIDER</component>
-      </service>
-    </group>
-
-    <group xsi:type="restart" name="FLUME" title="Flume">
-      <service-check>false</service-check>
-      <skippable>true</skippable>
-      <parallel-scheduler/>
-      <service name="FLUME">
-        <component>FLUME_HANDLER</component>
-      </service>
-    </group>
-
-    <!--
-    Invoke "iop-select set all" to change any components we may have missed
-    that are installed on the hosts but not known by Ambari.
-    -->
-    <group xsi:type="cluster" name="ALL_HOST_OPS" title="Set Version On All Hosts">
-      <skippable>true</skippable>
-      <supports-auto-skip-failure>false</supports-auto-skip-failure>
-
-      <execute-stage title="Update stack to {{version}}">
-        <task xsi:type="execute">
-          <script>scripts/ru_set_all.py</script>
-          <function>actionexecute</function>
-        </task>
-      </execute-stage>
-    </group>
-
-    <group xsi:type="cluster" name="FINALIZE_PRE_CHECK" title="Finalize {{direction.text.proper}} Pre-Check">
-      <direction>UPGRADE</direction>
-
-      <execute-stage title="Check Component Versions">
-        <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.ComponentVersionCheckAction" />
-      </execute-stage>
-    </group>
-
-    <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize {{direction.text.proper}}">
-      <skippable>true</skippable>
-      <supports-auto-skip-failure>false</supports-auto-skip-failure>
-
-      <execute-stage title="Confirm Finalize">
-        <direction>UPGRADE</direction>
-        <task xsi:type="manual">
-          <message>Please confirm you are ready to finalize.</message>
-        </task>
-      </execute-stage>
-
-      <execute-stage service="HDFS" component="NAMENODE" title="Execute HDFS Finalize">
-        <task xsi:type="execute" hosts="master">
-          <script>scripts/namenode.py</script>
-          <function>finalize_non_rolling_upgrade</function>
-        </task>
-      </execute-stage>
-
-      <execute-stage title="Save Cluster State" service="" component="">
-        <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.FinalizeUpgradeAction">
-        </task>
-      </execute-stage>
-    </group>
-  </order>
-
-  <processing>
-    <service name="ZOOKEEPER">
-      <component name="ZOOKEEPER_SERVER">
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-
-      <component name="ZOOKEEPER_CLIENT">
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-    </service>
-
-    <service name="HDFS">
-      <component name="NAMENODE">
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-
-      <component name="DATANODE">
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-
-      <component name="HDFS_CLIENT">
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-
-      <component name="JOURNALNODE">
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-
-      <component name="ZKFC">
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-    </service>
-
-    <service name="MAPREDUCE2">
-      <component name="HISTORYSERVER">
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-
-      <component name="MAPREDUCE2_CLIENT">
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-    </service>
-
-    <service name="YARN">
-      <component name="APP_TIMELINE_SERVER">
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-
-      <component name="RESOURCEMANAGER">
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-
-      <component name="NODEMANAGER">
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-
-      <component name="YARN_CLIENT">
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-    </service>
-
-    <service name="HBASE">
-      <component name="HBASE_MASTER">
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-
-      <component name="HBASE_REGIONSERVER">
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-
-      <component name="HBASE_CLIENT">
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-    </service>
-
-    <service name="PIG">
-      <component name="PIG">
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-    </service>
-
-    <service name="SQOOP">
-      <component name="SQOOP">
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-    </service>
-
-    <service name="HIVE">
-      <component name="HIVE_METASTORE">
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-
-      <component name="HIVE_SERVER">
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-
-      <component name="WEBHCAT_SERVER">
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-
-      <component name="HIVE_CLIENT">
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-
-      <component name="HCAT">
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-    </service>
-
-    <service name="SPARK">
-      <component name="SPARK_JOBHISTORYSERVER">
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-      <component name="SPARK_THRIFTSERVER">
-        <upgrade>
-          <task xsi:type="restart-task" />
-        </upgrade>
-      </component>
-      <component name="SPARK_CLIENT">
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-    </service>
-
-    <service name="OOZIE">
-      <component name="OOZIE_SERVER">
-        <pre-upgrade>
-          <task xsi:type="execute" hosts="any" summary="Upgrading the database and creating a new sharelib">
-            <script>scripts/oozie_server_upgrade.py</script>
-            <function>upgrade_oozie_database_and_sharelib</function>
-          </task>
-        </pre-upgrade>
-
-        <pre-downgrade>
-          <task xsi:type="execute" hosts="any" summary="Create a new sharelib">
-            <script>scripts/oozie_server_upgrade.py</script>
-            <function>create_sharelib</function>
-          </task>
-        </pre-downgrade>
-
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-
-      <component name="OOZIE_CLIENT">
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-    </service>
-
-    <service name="KAFKA">
-      <component name="KAFKA_BROKER">
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-    </service>
-
-    <service name="KNOX">
-      <component name="KNOX_GATEWAY">
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-    </service>
-
-    <service name="SLIDER">
-      <component name="SLIDER">
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-    </service>
-
-    <service name="FLUME">
-      <component name="FLUME_HANDLER">
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-    </service>
-  </processing>
-</upgrade>

+ 0 - 569
ambari-server/src/main/resources/stacks/BigInsights/4.0/upgrades/upgrade-4.1.xml

@@ -1,569 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-
-<upgrade xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
-  <target>4.1.*.*</target>
-  <target-stack>BigInsights-4.1</target-stack>
-  <type>ROLLING</type>
-  <skip-failures>false</skip-failures>
-  <skip-service-check-failures>false</skip-service-check-failures>
-  <prerequisite-checks>
-    <!-- List of additional pre-req checks to run in addition to the required pre-reqs -->
-    <check>org.apache.ambari.server.checks.HiveMultipleMetastoreCheck</check>
-    <check>org.apache.ambari.server.checks.SecondaryNamenodeDeletedCheck</check>
-    <check>org.apache.ambari.server.checks.ServicesMapReduceDistributedCacheCheck</check>
-    <check>org.apache.ambari.server.checks.ServicesNamenodeHighAvailabilityCheck</check>
-    <check>org.apache.ambari.server.checks.ServicesNamenodeTruncateCheck</check>
-    <check>org.apache.ambari.server.checks.ServicesTezDistributedCacheCheck</check>
-    <check>org.apache.ambari.server.checks.ServicesYarnWorkPreservingCheck</check>
-    <check>org.apache.ambari.server.checks.YarnRMHighAvailabilityCheck</check>
-    <check>org.apache.ambari.server.checks.YarnTimelineServerStatePreservingCheck</check>
-
-    <configuration>
-      <!-- Configuration properties for all pre-reqs including required pre-reqs -->
-      <check-properties name="org.apache.ambari.server.checks.YarnTimelineServerStatePreservingCheck">
-        <property name="min-applicable-stack-version">BigInsights-4.0</property>
-      </check-properties>
-      
-      <check-properties name="org.apache.ambari.server.checks.HiveDynamicServiceDiscoveryCheck">
-        <property name="min-failure-stack-version">BigInsights-4.1</property>
-      </check-properties>
-    </configuration>
-  </prerequisite-checks>
-  
-  <order>
-    <group xsi:type="cluster" name="PRE_CLUSTER" title="Prepare Upgrade">
-      <direction>UPGRADE</direction>
-      <supports-auto-skip-failure>false</supports-auto-skip-failure>
-      <execute-stage service="HDFS" component="NAMENODE" title="Pre Upgrade HDFS">
-        <task xsi:type="execute" hosts="master">
-          <script>scripts/namenode.py</script>
-          <function>prepare_rolling_upgrade</function>
-        </task>
-      </execute-stage>
-    </group>
-    
-    <group xsi:type="cluster" name="PREPARE_BACKUPS" title="Prepare Backups">
-      <direction>UPGRADE</direction>
-      <supports-auto-skip-failure>false</supports-auto-skip-failure>
-      <execute-stage service="HBASE" component="HBASE_MASTER" title="Pre Upgrade HBase Backup">
-        <task xsi:type="execute" hosts="master">
-          <script>scripts/hbase_upgrade.py</script>
-          <function>snapshot</function>
-        </task>
-      </execute-stage>
-
-      <execute-stage service="HIVE" component="HIVE_METASTORE" title="Pre Upgrade Hive Backup">
-        <task xsi:type="manual">
-          <message>Before continuing, please backup the Hive Metastore database referenced by the Hive Metastore service(s) on the following host(s): {{hosts.all}}.</message>
-        </task>
-      </execute-stage>
-
-      <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Pre Upgrade Oozie Backup">
-        <task xsi:type="manual">
-          <message>Before continuing, please backup the Oozie Server database referenced by the Oozie server located on {{hosts.all}}.</message>
-        </task>
-      </execute-stage>
-
-    </group>
-
-    <group name="ZOOKEEPER" title="ZooKeeper">
-      <service name="ZOOKEEPER">
-        <component>ZOOKEEPER_SERVER</component>
-      </service>
-    </group>
-    
-    <group name="CORE_MASTER" title="Core Masters">
-      <service-check>false</service-check>
-      <service name="HDFS">
-        <component>JOURNALNODE</component>
-        <component>NAMENODE</component>
-        <component>ZKFC</component>
-      </service>
-
-      <service name="MAPREDUCE2">
-        <component>HISTORYSERVER</component>
-      </service>
-
-      <service name="YARN">
-        <component>APP_TIMELINE_SERVER</component>
-        <component>RESOURCEMANAGER</component>
-      </service>
-
-      <service name="HBASE">
-        <component>HBASE_MASTER</component>
-      </service>
-      
-    </group>
-    
-    <group name="SERVICE_CHECK" title="All Service Checks" xsi:type="service-check">
-      <skippable>true</skippable>
-      <direction>UPGRADE</direction>
-      <priority>
-        <service>ZOOKEEPER</service>
-        <service>HDFS</service>
-        <service>YARN</service>
-        <service>MAPREDUCE2</service>
-        <service>HBASE</service>
-      </priority>
-      <exclude>
-        <service>AMBARI_METRICS</service>
-      </exclude>
-    </group>
-    
-    <group name="CORE_SLAVES" title="Core Slaves" xsi:type="colocated">
-      <skippable>true</skippable>
-      <service-check>false</service-check>
-      <service name="HDFS">
-        <component>DATANODE</component>
-      </service>
-      
-      <service name="HBASE">
-        <component>HBASE_REGIONSERVER</component>
-      </service>
-
-      <service name="YARN">
-        <component>NODEMANAGER</component>
-      </service>
-
-      <batch>
-        <percent>20</percent>
-        <summary>Verification Required</summary>
-        <message>The initial batch of {{components}} hosts have been {{direction.past}}. You are advised to check the hosts and perform cluster/workload-specific tests against your cluster to ensure proper operation before proceeding with {{direction.text}} of the remaining services.</message>
-      </batch>
-    </group>
-    
-    <group name="SERVICE_CHECK" title="All Service Checks" xsi:type="service-check">
-      <skippable>true</skippable>
-      <direction>UPGRADE</direction>
-      <priority>
-        <service>ZOOKEEPER</service>
-        <service>HDFS</service>
-        <service>YARN</service>
-        <service>HBASE</service>
-      </priority>
-      <exclude>
-        <service>AMBARI_METRICS</service>
-      </exclude>
-    </group>
-
-    <group name="HIVE" title="Hive">
-      <skippable>true</skippable>
-      <supports-auto-skip-failure>false</supports-auto-skip-failure>
-      <service name="HIVE">
-        <component>HIVE_METASTORE</component>
-        <component>HIVE_SERVER</component>
-        <component>WEBHCAT_SERVER</component>
-      </service>
-    </group>
-
-    <group name="SPARK" title="Spark">
-      <skippable>true</skippable>
-      <supports-auto-skip-failure>false</supports-auto-skip-failure>
-      <service name="SPARK">
-        <component>SPARK_JOBHISTORYSERVER</component>
-        <component>SPARK_THRIFTSERVER</component>
-        <!--<component>SPARK_CLIENT</component>-->
-      </service>
-    </group>
-
-   <group name="SPARK_CLIENTS" title="Spark Clients">
-      <skippable>true</skippable>
-      <service name="SPARK">
-        <component>SPARK_CLIENT</component>
-      </service>
-    </group>
-
-    <group name="OOZIE" title="Oozie">
-      <skippable>true</skippable>
-      <supports-auto-skip-failure>false</supports-auto-skip-failure>
-      <service-check>false</service-check>
-      <service name="OOZIE">
-        <component>OOZIE_SERVER</component>
-      </service>
-    </group>
-
-    <group name="OOZIE_CLIENTS" title="Oozie Clients">
-      <skippable>true</skippable>
-      <service name="OOZIE">
-        <component>OOZIE_CLIENT</component>
-      </service>
-    </group>
-
-    <group name="CLIENTS" title="Client Components">
-      <service-check>false</service-check>
-      <service name="ZOOKEEPER">
-        <component>ZOOKEEPER_CLIENT</component>
-      </service>
-      <service name="HDFS">
-        <component>HDFS_CLIENT</component>
-      </service>
-
-      <service name="YARN">
-        <component>YARN_CLIENT</component>
-      </service>
-
-      <service name="MAPREDUCE2">
-        <component>MAPREDUCE2_CLIENT</component>
-      </service>
- 
-      <service name="HBASE">
-        <component>HBASE_CLIENT</component>
-      </service>
-
-      <service name="PIG">
-        <component>PIG</component>
-      </service>
-
-      <service name="SQOOP">
-        <component>SQOOP</component>
-      </service>
-
-      <service name="HIVE">
-        <component>HIVE_CLIENT</component>
-        <component>HCAT</component>
-      </service>
-    </group>
-
-    <group name="SERVICE_CHECK" title="All Service Checks" xsi:type="service-check">
-      <skippable>true</skippable>
-      <direction>UPGRADE</direction>
-      <priority>
-        <service>ZOOKEEPER</service>
-        <service>HDFS</service>
-        <service>YARN</service>
-        <service>HBASE</service>
-      </priority>
-      <exclude>
-        <service>AMBARI_METRICS</service>
-      </exclude>
-    </group>
-
-    <group name="KAFKA" title="Kafka">
-      <skippable>true</skippable>
-      <service name="KAFKA">
-        <component>KAFKA_BROKER</component>
-      </service>
-    </group>
-
-    <group name="KNOX" title="Knox">
-      <skippable>true</skippable>
-      <service name="KNOX">
-        <component>KNOX_GATEWAY</component>
-      </service>
-    </group>
-
-    <group name="SLIDER" title="Slider">
-      <skippable>true</skippable>
-      <service name="SLIDER">
-        <component>SLIDER</component>
-      </service>
-    </group>
-
-    <group name="FLUME" title="Flume">
-      <skippable>true</skippable>
-      <service name="FLUME">
-        <component>FLUME_HANDLER</component>
-      </service>
-    </group>
-
-   <group xsi:type="cluster" name="ALL_HOST_OPS" title="Finalize Hosts">
-      <execute-stage title="Update remaining BigInsights stack to {{version}}">
-        <task xsi:type="execute">
-          <script>scripts/ru_set_all.py</script>
-          <function>actionexecute</function>
-        </task>
-      </execute-stage>
-    </group>
-
-    <group xsi:type="cluster" name="FINALIZE_PRE_CHECK" title="Finalize {{direction.text.proper}} Pre-Check">
-      <direction>UPGRADE</direction>
-      <execute-stage title="Check Component Versions">
-        <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.ComponentVersionCheckAction" />
-      </execute-stage>
-    </group>
-
-    <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize {{direction.text.proper}}">
-      <skippable>true</skippable>
-      <supports-auto-skip-failure>false</supports-auto-skip-failure>
-      
-      <execute-stage title="Confirm Finalize">
-        <direction>UPGRADE</direction>
-        <task xsi:type="manual">
-          <message>Please confirm you are ready to finalize.</message>
-        </task>
-      </execute-stage>
-
-      <execute-stage service="HDFS" component="NAMENODE" title="Execute HDFS Finalize">
-        <task xsi:type="execute" hosts="master">
-          <script>scripts/namenode.py</script>
-          <function>finalize_rolling_upgrade</function>
-        </task>
-      </execute-stage>
-      
-      <execute-stage title="Save Cluster State" service="" component="">
-        <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.FinalizeUpgradeAction">
-        </task>
-      </execute-stage>
-    </group>
-  </order>
-
-  <processing>
-    <service name="ZOOKEEPER">
-      <component name="ZOOKEEPER_SERVER">
-        <upgrade>
-          <task xsi:type="restart-task" />
-        </upgrade>
-      </component>
-
-      <component name="ZOOKEEPER_CLIENT">
-        <upgrade>
-          <task xsi:type="restart-task" />
-        </upgrade>
-      </component>
-    </service>
-
-    <service name="HDFS">
-      <component name="NAMENODE">
-        <upgrade>
-          <task xsi:type="restart-task" />
-        </upgrade>
-      </component>
-
-      <component name="DATANODE">
-        <upgrade>
-          <task xsi:type="restart-task" />
-        </upgrade>
-      </component>
-
-      <component name="HDFS_CLIENT">
-        <upgrade>
-          <task xsi:type="restart-task" />
-        </upgrade>
-        <post-upgrade>
-          <task xsi:type="manual">
-            <message>Please make sure to restart Oozie service after the finalization, in order to refresh the Hadoop related configurations.</message>
-          </task>
-        </post-upgrade>
-      </component>
-
-      <component name="JOURNALNODE">
-        <upgrade>
-          <task xsi:type="restart-task" />
-        </upgrade>
-      </component>
-      
-      <component name="ZKFC">
-        <upgrade>
-          <task xsi:type="restart-task" />
-        </upgrade>
-      </component>
-    </service>
-
-    <service name="MAPREDUCE2">
-      <component name="HISTORYSERVER">
-        <pre-upgrade>
-          <task xsi:type="manual">
-            <summary>Verify HDFS Safe Mode is off</summary>
-            <message>
-              Before continuing, please verify that HDFS has exited safe mode by running the "hdfs dfsadmin -safemode get" command as the HDFS user on an HDFS node.
-              Make sure the output indicates that safe mode is off on the NameNode hosts listed.
-              Also make sure one of the NameNodes is the active NameNode by running  "hdfs haadmin -getServiceState [NameNode identifier]" as the HDFS user on an HDFS node.
-              The NameNode identifiers are listed in dfs.ha.namenodes.{{hdfs-site/dfs.nameservices}}.
-              By default the NameNode identifiers are "nn1" and "nn2", so the commands would be:
-              "hdfs haadmin -getServiceState nn1" and "hdfs haadmin -getServiceState nn2". 
-            </message>
-          </task>
-        </pre-upgrade>
-      
-        <upgrade>
-          <task xsi:type="restart-task" />
-        </upgrade>
-      </component>
-
-      <component name="MAPREDUCE2_CLIENT">
-        <upgrade>
-          <task xsi:type="restart-task" />
-        </upgrade>
-      </component>
-    </service>
-
-    <service name="YARN">
-      <component name="APP_TIMELINE_SERVER">
-        <upgrade>
-          <task xsi:type="restart-task" />
-        </upgrade>
-      </component>
-
-      <component name="RESOURCEMANAGER">
-        <upgrade>
-          <task xsi:type="restart-task" />
-        </upgrade>
-      </component>
-
-      <component name="NODEMANAGER">
-        <upgrade>
-          <task xsi:type="restart-task" />
-        </upgrade>
-      </component>
-
-      <component name="YARN_CLIENT">
-        <upgrade>
-          <task xsi:type="restart-task" />
-        </upgrade>
-      </component>
-    </service>
-    <service name="HBASE">
-      <component name="HBASE_MASTER">
-        <upgrade>
-          <task xsi:type="restart-task" />
-        </upgrade>
-      </component>
-
-      <component name="HBASE_REGIONSERVER">
-        <upgrade>
-          <task xsi:type="restart-task" />
-        </upgrade>
-      </component>
-
-      <component name="HBASE_CLIENT">
-        <upgrade>
-          <task xsi:type="restart-task" />
-        </upgrade>
-      </component>
-    </service>
-   
-    <service name="PIG">
-      <component name="PIG">
-        <upgrade>
-          <task xsi:type="restart-task" />
-        </upgrade>
-      </component>
-    </service>
-
-    <service name="SQOOP">
-      <component name="SQOOP">
-        <upgrade>
-          <task xsi:type="restart-task" />
-        </upgrade>
-      </component>
-    </service>
-
-    <service name="HIVE">
-      <component name="HIVE_METASTORE">
-        <upgrade>
-          <task xsi:type="restart-task" />
-        </upgrade>
-      </component>
-
-      <component name="HIVE_SERVER">
-        <upgrade>
-          <task xsi:type="restart-task" />
-        </upgrade>
-      </component>
-
-      <component name="WEBHCAT_SERVER">
-        <upgrade>
-          <task xsi:type="restart-task" />
-        </upgrade>
-      </component>
-
-      <component name="HIVE_CLIENT">
-        <upgrade>
-          <task xsi:type="restart-task" />
-        </upgrade>
-      </component>
-
-      <component name="HCAT">
-        <upgrade>
-          <task xsi:type="restart-task" />
-        </upgrade>
-      </component>
-    </service>
-
-    <service name="SLIDER">
-      <component name="SLIDER">
-        <upgrade>
-          <task xsi:type="restart-task" />
-        </upgrade>
-      </component>
-    </service>
-
-    <service name="SPARK">
-      <component name="SPARK_JOBHISTORYSERVER">
-        <upgrade>
-          <task xsi:type="restart-task" />
-        </upgrade>
-      </component>
-      <component name="SPARK_THRIFTSERVER">
-        <upgrade>
-          <task xsi:type="restart-task" />
-        </upgrade>
-      </component>
-      <component name="SPARK_CLIENT">
-        <upgrade>
-          <task xsi:type="restart-task" />
-        </upgrade>
-      </component>
-    </service>
-
-    <service name="OOZIE">
-      <component name="OOZIE_SERVER">
-        <pre-upgrade>
-          <task xsi:type="execute" hosts="all" summary="Shut down all Oozie servers">
-            <script>scripts/oozie_server.py</script>
-            <function>stop</function>
-          </task>
-        </pre-upgrade>
-        <upgrade>
-          <task xsi:type="restart-task" />
-        </upgrade>
-      </component>
-      <component name="OOZIE_CLIENT">
-        <upgrade>
-          <task xsi:type="restart-task" />
-        </upgrade>
-      </component>
-    </service>
-
-    <service name="KAFKA">
-      <component name="KAFKA_BROKER">
-        <upgrade>
-          <task xsi:type="restart-task" />
-        </upgrade>
-      </component>
-    </service>
-
-    <service name="KNOX">
-      <component name="KNOX_GATEWAY">
-        <upgrade>
-          <task xsi:type="restart-task" />
-        </upgrade>
-      </component>
-    </service>
-    
-    <service name="FLUME">
-      <component name="FLUME_HANDLER">
-        <upgrade>
-          <task xsi:type="restart-task" />
-        </upgrade>
-      </component>
-    </service>
-  </processing>
-</upgrade>

+ 0 - 95
ambari-server/src/main/resources/stacks/BigInsights/4.0/widgets.json

@@ -1,95 +0,0 @@
-{
-  "layouts": [
-    {
-      "layout_name": "default_system_heatmap",
-      "display_name": "Heatmaps",
-      "section_name": "SYSTEM_HEATMAPS",
-      "widgetLayoutInfo": [
-        {
-          "widget_name": "Host Disk Space Used %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "disk_free",
-              "metric_path": "metrics/disk/disk_free",
-              "service_name": "STACK"
-            },
-            {
-              "name": "disk_total",
-              "metric_path": "metrics/disk/disk_total",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Disk Space Used %",
-              "value": "${((disk_total-disk_free)/disk_total)*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Host Memory Used %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "mem_total",
-              "metric_path": "metrics/memory/mem_total",
-              "service_name": "STACK"
-            },
-            {
-              "name": "mem_free",
-              "metric_path": "metrics/memory/mem_free",
-              "service_name": "STACK"
-            },
-            {
-              "name": "mem_cached",
-              "metric_path": "metrics/memory/mem_cached",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Memory Used %",
-              "value": "${((mem_total-mem_free-mem_cached)/mem_total)*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Host CPU Wait IO %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "cpu_wio",
-              "metric_path": "metrics/cpu/cpu_wio",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Memory Used %",
-              "value": "${cpu_wio*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        }
-      ]
-    }
-  ]
-}

+ 0 - 47
ambari-server/src/main/resources/stacks/BigInsights/4.1/kerberos.json

@@ -1,47 +0,0 @@
-{
-  "properties": {
-    "realm": "${kerberos-env/realm}",
-    "keytab_dir": "/etc/security/keytabs"
-  },
-  "identities": [
-    {
-      "name": "spnego",
-      "principal": {
-        "value": "HTTP/_HOST@${realm}",
-        "type" : "service"
-      },
-      "keytab": {
-        "file": "${keytab_dir}/spnego.service.keytab",
-        "owner": {
-          "name": "root",
-          "access": "r"
-        },
-        "group": {
-          "name": "${cluster-env/user_group}",
-          "access": "r"
-        }
-      }
-    },
-    {
-      "name": "smokeuser",
-      "principal": {
-        "value": "${cluster-env/smokeuser}-${cluster_name}@${realm}",
-        "type" : "user",
-        "configuration": "cluster-env/smokeuser_principal_name",
-        "local_username" : "${cluster-env/smokeuser}"
-      },
-      "keytab": {
-        "file": "${keytab_dir}/smokeuser.headless.keytab",
-        "owner": {
-          "name": "${cluster-env/smokeuser}",
-          "access": "r"
-        },
-        "group": {
-          "name": "${cluster-env/user_group}",
-          "access": "r"
-        },
-        "configuration": "cluster-env/smokeuser_keytab"
-      }
-    }
-  ]
-}

+ 0 - 23
ambari-server/src/main/resources/stacks/BigInsights/4.1/metainfo.xml

@@ -1,23 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <versions>
-	  <active>false</active>
-    </versions>
-    <extends>4.0</extends>
-</metainfo>

+ 0 - 44
ambari-server/src/main/resources/stacks/BigInsights/4.1/repos/repoinfo.xml

@@ -1,44 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<reposinfo>
-  <mainrepoid>IOP-4.1</mainrepoid>
-  <os family="redhat6">
-    <repo>
-      <baseurl>http://ibm-open-platform.ibm.com/repos/IOP/rhel/6/x86_64/4.1.x/GA/4.1.0.0/</baseurl>
-      <repoid>IOP-4.1</repoid>
-      <reponame>IOP</reponame>
-    </repo>
-    <repo>
-      <baseurl>http://ibm-open-platform.ibm.com/repos/IOP-UTILS/rhel/6/x86_64/1.1/</baseurl>
-      <repoid>IOP-UTILS-1.1</repoid>
-      <reponame>IOP-UTILS</reponame>
-    </repo>
-  </os>
-  <os family="redhat7">
-    <repo>
-      <baseurl>http://ibm-open-platform.ibm.com/repos/IOP/rhel/7/x86_64/4.1.x/GA/4.1.0.0/</baseurl>
-      <repoid>IOP-4.1</repoid>
-      <reponame>IOP</reponame>
-    </repo>
-    <repo>
-      <baseurl>http://ibm-open-platform.ibm.com/repos/IOP-UTILS/rhel/7/x86_64/1.1/</baseurl>
-      <repoid>IOP-UTILS-1.1</repoid>
-      <reponame>IOP-UTILS</reponame>
-    </repo>
-  </os>
-</reposinfo>

+ 0 - 32
ambari-server/src/main/resources/stacks/BigInsights/4.1/repos/repoinfo.xml.amd64_RH6

@@ -1,32 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<reposinfo>
-  <mainrepoid>IOP-4.1</mainrepoid>
-  <os family="redhat6">
-    <repo>
-      <baseurl>http://ibm-open-platform.ibm.com/repos/IOP/rhel/6/x86_64/4.1.x/GA/4.1.0.0/</baseurl>
-      <repoid>IOP-4.1</repoid>
-      <reponame>IOP</reponame>
-    </repo>
-    <repo>
-      <baseurl>http://ibm-open-platform.ibm.com/repos/IOP-UTILS/rhel/6/x86_64/1.1/</baseurl>
-      <repoid>IOP-UTILS-1.1</repoid>
-      <reponame>IOP-UTILS</reponame>
-    </repo>
-  </os>
-</reposinfo>

+ 0 - 32
ambari-server/src/main/resources/stacks/BigInsights/4.1/repos/repoinfo.xml.amd64_RH7

@@ -1,32 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<reposinfo>
-  <mainrepoid>IOP-4.1</mainrepoid>
-  <os family="redhat7">
-    <repo>
-      <baseurl>http://ibm-open-platform.ibm.com/repos/IOP/rhel/7/x86_64/4.1.x/GA/4.1.0.0/</baseurl>
-      <repoid>IOP-4.1</repoid>
-      <reponame>IOP</reponame>
-    </repo>
-    <repo>
-      <baseurl>http://ibm-open-platform.ibm.com/repos/IOP-UTILS/rhel/7/x86_64/1.1/</baseurl>
-      <repoid>IOP-UTILS-1.1</repoid>
-      <reponame>IOP-UTILS</reponame>
-    </repo>
-  </os>
-</reposinfo>

+ 0 - 32
ambari-server/src/main/resources/stacks/BigInsights/4.1/repos/repoinfo.xml.amd64_SLES

@@ -1,32 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<reposinfo>
-  <mainrepoid>IOP-4.1</mainrepoid>
-  <os family="suse11">
-    <repo>
-      <baseurl>https://ibm-open-platform.ibm.com/repos/IOP/sles/11/x86_64/4.1.x/GA/4.1.0.0/</baseurl>
-      <repoid>IOP-4.1</repoid>
-      <reponame>IOP</reponame>
-    </repo>
-    <repo>
-      <baseurl>https://ibm-open-platform.ibm.com/repos/IOP-UTILS/sles/11/x86_64/1.1/</baseurl>
-      <repoid>IOP-UTILS-1.1</repoid>
-      <reponame>IOP-UTILS</reponame>
-    </repo>
-  </os>
-</reposinfo>

+ 0 - 32
ambari-server/src/main/resources/stacks/BigInsights/4.1/repos/repoinfo.xml.ppc64le_RH7

@@ -1,32 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<reposinfo>
-  <mainrepoid>IOP-4.1</mainrepoid>
-  <os family="redhat7">
-    <repo>
-      <baseurl>http://ibm-open-platform.ibm.com/repos/IOP/rhel/7/ppc64le/4.1.x/GA/4.1.0.0/</baseurl>
-      <repoid>IOP-4.1</repoid>
-      <reponame>IOP</reponame>
-    </repo>
-    <repo>
-      <baseurl>http://ibm-open-platform.ibm.com/repos/IOP-UTILS/rhel/7/ppc64le/1.1/</baseurl>
-      <repoid>IOP-UTILS-1.1</repoid>
-      <reponame>IOP-UTILS</reponame>
-    </repo>
-  </os>
-</reposinfo>

+ 0 - 32
ambari-server/src/main/resources/stacks/BigInsights/4.1/repos/repoinfo.xml.s390x_RH7

@@ -1,32 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<reposinfo>
-  <mainrepoid>IOP-4.1</mainrepoid>
-  <os family="redhat7">
-    <repo>
-      <baseurl>https://ibm-open-platform.ibm.com/repos/IOP/rhel/7/s390x/4.1.x/GA/4.1.0.0/</baseurl>
-      <repoid>IOP-4.1</repoid>
-      <reponame>IOP</reponame>
-    </repo>
-    <repo>
-      <baseurl>https://ibm-open-platform.ibm.com/repos/IOP-UTILS/rhel/7/s390x/1.1/</baseurl>
-      <repoid>IOP-UTILS-1.1</repoid>
-      <reponame>IOP-UTILS</reponame>
-    </repo>
-  </os>
-</reposinfo>

+ 0 - 22
ambari-server/src/main/resources/stacks/BigInsights/4.1/role_command_order.json

@@ -1,22 +0,0 @@
-{
-  "_comment" : "Record format:",
-  "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
-  "general_deps" : {
-    "_comment" : "dependencies for all cases",
-    "SOLR-START": ["ZOOKEEPER_SERVER-START", "HDFS_SERVICE_CHECK-SERVICE_CHECK"],
-    "SOLR_SERVICE_CHECK-SERVICE_CHECK": ["SOLR-START"],
-    "KAFKA_BROKER-START" : ["ZOOKEEPER_SERVER-START"],
-    "KAFKA_SERVICE_CHECK-SERVICE_CHECK": ["KAFKA_BROKER-START"],
-    "BIGSQL_HEAD-INSTALL" : ["HIVE_CLIENT-INSTALL", "HDFS_CLIENT-INSTALL", "HBASE_CLIENT-INSTALL", "SQOOP-INSTALL"],
-    "BIGSQL_WORKER-INSTALL" : ["HIVE_CLIENT-INSTALL", "HDFS_CLIENT-INSTALL", "HBASE_CLIENT-INSTALL", "SQOOP-INSTALL", "SLIDER-INSTALL"],
-    "BIGSQL_HEAD-START": ["HBASE_REGIONSERVER-START", "OOZIE_SERVER-START", "WEBHCAT_SERVER-START", "HIVE_METASTORE-START", "HIVE_SERVER-START"],
-    "BIGSQL_SECONDARY-START": ["BIGSQL_HEAD-START"],
-    "BIGSQL_WORKER-START": ["BIGSQL_HEAD-START", "BIGSQL_SECONDARY-START"],
-    "BIGSQL_SERVICE_CHECK-SERVICE_CHECK": ["BIGSQL_WORKER-START"]
-  },
-  "_comment" : "Dependencies that are used when GLUSTERFS is not present in cluster",
-  "optional_no_glusterfs": {
-    "METRICS_COLLECTOR-START": ["NAMENODE-START", "DATANODE-START"],
-    "AMBARI_METRICS_SERVICE_CHECK-SERVICE_CHECK": ["METRICS_COLLECTOR-START", "HDFS_SERVICE_CHECK-SERVICE_CHECK"]
-  }
-}

+ 0 - 28
ambari-server/src/main/resources/stacks/BigInsights/4.1/services/AMBARI_METRICS/metainfo.xml

@@ -1,28 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>AMBARI_METRICS</name>
-      <version>0.1.0</version>
-      <extends>common-services/AMBARI_METRICS/0.1.0</extends>
-    </service>
-  </services>
-</metainfo>
-

+ 0 - 70
ambari-server/src/main/resources/stacks/BigInsights/4.1/services/FLUME/configuration/flume-env.xml

@@ -1,70 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>	
-    <name>flume_run_dir</name>	
-    <value>/var/run/flume</value>	
-    <description>Location to save information about running agents</description>	
-  </property>
-
-  <!-- flume-env.sh -->
-  <property>
-    <name>content</name>
-    <description>This is the jinja template for flume-env.sh file</description>
-    <value>
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# If this file is placed at FLUME_CONF_DIR/flume-env.sh, it will be sourced
-# during Flume startup.
-
-# Enviroment variables can be set here.
-
-export JAVA_HOME={{java_home}}
-
-# Give Flume more memory and pre-allocate, enable remote monitoring via JMX
-# export JAVA_OPTS="-Xms100m -Xmx2000m -Dcom.sun.management.jmxremote"
-
-# Note that the Flume conf directory is always included in the classpath.
-# Add flume sink to classpath
-if [ -e "/usr/lib/flume/lib/ambari-metrics-flume-sink.jar" ]; then
-  export FLUME_CLASSPATH=$FLUME_CLASSPATH:/usr/lib/flume/lib/ambari-metrics-flume-sink.jar
-fi
-
-export HIVE_HOME={{flume_hive_home}}
-export HCAT_HOME={{flume_hcat_home}}
-    </value>
-  </property>
-</configuration>

+ 0 - 36
ambari-server/src/main/resources/stacks/BigInsights/4.1/services/FLUME/metainfo.xml

@@ -1,36 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>FLUME</name>
-      <version>1.5.2</version>
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>flume_4_1_*</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-    </service>
-  </services>
-</metainfo>

+ 0 - 45
ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HBASE/metainfo.xml

@@ -1,45 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>HBASE</name>
-      <version>1.1.1</version>
-
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>hbase_4_1_*</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      
-      <themes>
-        <theme>
-           <fileName>theme.json</fileName>
-           <default>true</default>
-        </theme>
-      </themes>
-      
-    </service>
-  </services>
-</metainfo>

Kaikkia tiedostoja ei voida näyttää, sillä liian monta tiedostoa muuttui tässä diffissä