ソースを参照

AMBARI-15265. Install & Manage Zeppelin with Ambari (Renjith Kamath via smohanty)

Sumit Mohanty 9 年 前
コミット
76627aa8e4
18 ファイル変更1373 行追加0 行削除
  1. 18 0
      ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/alerts.json
  2. 179 0
      ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-config.xml
  3. 163 0
      ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-env.xml
  4. 17 0
      ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/kerberos.json
  5. 64 0
      ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/metainfo.xml
  6. 26 0
      ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/alert_check_zeppelin.py
  7. 216 0
      ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py
  8. 155 0
      ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/params.py
  9. 92 0
      ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/setup_snapshot.sh
  10. 29 0
      ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/status_params.py
  11. 8 0
      ambari-server/src/main/resources/stacks/HDP/2.5/role_command_order.json
  12. 46 0
      ambari-server/src/main/resources/stacks/HDP/2.5/services/ZEPPELIN/metainfo.xml
  13. 1 0
      contrib/views/pom.xml
  14. 160 0
      contrib/views/zeppelin/pom.xml
  15. 57 0
      contrib/views/zeppelin/src/main/java/org/apache/ambari/view/zeppelin/ZeppelinServlet.java
  16. 56 0
      contrib/views/zeppelin/src/main/resources/WEB-INF/index.jsp
  17. 38 0
      contrib/views/zeppelin/src/main/resources/WEB-INF/web.xml
  18. 48 0
      contrib/views/zeppelin/src/main/resources/view.xml

+ 18 - 0
ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/alerts.json

@@ -0,0 +1,18 @@
+{
+  "ZEPPELIN": {
+    "service": [],
+    "ZEPPELIN_MASTER": [
+      {
+        "name": "zeppelin_server_status",
+        "label": "Zeppelin Server Status",
+        "description": "This host-level alert is triggered if the Zeppelin server cannot be determined to be up and responding to client requests.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "SCRIPT",
+          "path": "ZEPPELIN/0.6.0.2.5/package/scripts/alert_check_zeppelin.py"
+        }
+      }
+    ]
+  }
+}

+ 179 - 0
ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-config.xml

@@ -0,0 +1,179 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration>
+
+
+    <!-- contents of actual zeppelin-site.xml -->
+
+    <property>
+        <name>zeppelin.server.addr</name>
+        <value>0.0.0.0</value>
+        <description>Server address</description>
+    </property>
+
+    <property>
+        <name>zeppelin.server.port</name>
+        <value>9995</value>
+        <description>Server port.The subsequent port (e.g. 9996) should also be open as it will be
+            used by the web socket
+        </description>
+    </property>
+
+    <property>
+        <name>zeppelin.notebook.dir</name>
+        <value>notebook</value>
+        <description>notebook persist</description>
+    </property>
+
+    <property>
+        <name>zeppelin.notebook.homescreen</name>
+        <value> </value>
+        <description>id of notebook to be displayed in homescreen. e.g.) 2A94M5J1Z Empty value
+            displays default home screen
+        </description>
+    </property>
+
+    <property>
+        <name>zeppelin.notebook.homescreen.hide</name>
+        <value>false</value>
+        <description>hide homescreen notebook from list when this value set to true</description>
+    </property>
+
+
+    <property>
+        <name>zeppelin.notebook.s3.user</name>
+        <value>user</value>
+        <description>user name for s3 folder structure. If S3 is used to store the notebooks, it is
+            necessary to use the following folder structure bucketname/username/notebook/
+        </description>
+    </property>
+
+    <property>
+        <name>zeppelin.notebook.s3.bucket</name>
+        <value>zeppelin</value>
+        <description>bucket name for notebook storage. If S3 is used to store the notebooks, it is
+            necessary to use the following folder structure bucketname/username/notebook/
+        </description>
+    </property>
+
+    <property>
+        <name>zeppelin.notebook.storage</name>
+        <value>org.apache.zeppelin.notebook.repo.VFSNotebookRepo</value>
+        <description>notebook persistence layer implementation. If S3 is used, set this to
+            org.apache.zeppelin.notebook.repo.S3NotebookRepo instead. If S3 is used to store the
+            notebooks, it is necessary to use the following folder structure
+            bucketname/username/notebook/
+        </description>
+    </property>
+
+    <property>
+        <name>zeppelin.interpreter.dir</name>
+        <value>interpreter</value>
+        <description>Interpreter implementation base directory</description>
+    </property>
+
+    <property>
+        <name>zeppelin.interpreters</name>
+        <value>org.apache.zeppelin.spark.SparkInterpreter,org.apache.zeppelin.spark.PySparkInterpreter,org.apache.zeppelin.spark.SparkSqlInterpreter,org.apache.zeppelin.spark.DepInterpreter,org.apache.zeppelin.markdown.Markdown,org.apache.zeppelin.angular.AngularInterpreter,org.apache.zeppelin.shell.ShellInterpreter,org.apache.zeppelin.hive.HiveInterpreter,org.apache.zeppelin.tajo.TajoInterpreter,org.apache.zeppelin.flink.FlinkInterpreter,org.apache.zeppelin.lens.LensInterpreter,org.apache.zeppelin.ignite.IgniteInterpreter,org.apache.zeppelin.ignite.IgniteSqlInterpreter,org.apache.zeppelin.cassandra.CassandraInterpreter,org.apache.zeppelin.geode.GeodeOqlInterpreter,org.apache.zeppelin.postgresql.PostgreSqlInterpreter,org.apache.zeppelin.phoenix.PhoenixInterpreter,org.apache.zeppelin.kylin.KylinInterpreter,org.apache.zeppelin.livy.LivySparkInterpreter,org.apache.zeppelin.livy.LivyPySparkInterpreter,org.apache.zeppelin.livy.LivySparkRInterpreter,org.apache.zeppelin.livy.LivySparkSQLInterpreter</value>
+        <description>Comma separated interpreter configurations. First interpreter become a
+            default
+        </description>
+    </property>
+
+    <property>
+        <name>zeppelin.interpreter.connect.timeout</name>
+        <value>30000</value>
+        <description>Interpreter process connect timeout in msec.</description>
+    </property>
+
+    <property>
+        <name>zeppelin.ssl</name>
+        <value>false</value>
+        <description>Should SSL be used by the servers?</description>
+    </property>
+
+
+    <property>
+        <name>zeppelin.ssl.client.auth</name>
+        <value>false</value>
+        <description>Should client authentication be used for SSL connections?</description>
+    </property>
+
+    <property>
+        <name>zeppelin.ssl.keystore.path</name>
+        <value>conf/keystore</value>
+        <description>Path to keystore relative to Zeppelin home</description>
+    </property>
+
+    <property>
+        <name>zeppelin.ssl.keystore.type</name>
+        <value>JKS</value>
+        <description>The format of the given keystore (e.g. JKS or PKCS12)</description>
+    </property>
+
+    <property>
+        <name>zeppelin.ssl.keystore.password</name>
+        <value>change me</value>
+        <description>Keystore password. Can be obfuscated by the Jetty Password tool</description>
+    </property>
+
+
+    <property>
+        <name>zeppelin.ssl.key.manager.password</name>
+        <value>change me</value>
+        <description>Key Manager password. Defaults to keystore password. Can be obfuscated.
+        </description>
+    </property>
+
+
+    <property>
+        <name>zeppelin.ssl.truststore.path</name>
+        <value>conf/truststore</value>
+        <description>Path to truststore relative to Zeppelin home. Defaults to the keystore path
+        </description>
+    </property>
+
+    <property>
+        <name>zeppelin.ssl.truststore.type</name>
+        <value>JKS</value>
+        <description>The format of the given truststore (e.g. JKS or PKCS12). Defaults to the same
+            type as the keystore type
+        </description>
+    </property>
+
+
+    <property>
+        <name>zeppelin.ssl.truststore.password</name>
+        <value>change me</value>
+        <description>Truststore password. Can be obfuscated by the Jetty Password tool. Defaults to
+            the keystore password
+        </description>
+    </property>
+
+    <property>
+        <name>zeppelin.server.allowed.origins</name>
+        <value>*</value>
+        <description>Allowed sources for REST and WebSocket requests (i.e.
+            http://onehost:8080,http://otherhost.com). If you leave * you are vulnerable to
+            https://issues.apache.org/jira/browse/ZEPPELIN-173
+        </description>
+    </property>
+
+
+</configuration>

+ 163 - 0
ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-env.xml

@@ -0,0 +1,163 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<configuration>
+
+ <property>
+    <name>zeppelin_pid_dir</name>
+    <value>/var/run/zeppelin-notebook</value>
+    <description>Dir containing process ID file</description>
+  </property>
+
+  <property>
+    <name>zeppelin_user</name>
+    <value>zeppelin</value>
+    <property-type>USER</property-type>
+    <description>User zeppelin daemon runs as</description>
+  </property>
+
+  <property>
+    <name>zeppelin_group</name>
+    <value>zeppelin</value>
+    <property-type>GROUP</property-type>
+    <description>zeppelin group</description>
+  </property>
+
+  <property>
+    <name>zeppelin_log_dir</name>
+    <value>/var/log/zeppelin</value>
+    <description>Zeppelin Log dir</description>
+  </property>
+
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for zeppelin-env.sh file</description>
+    <value>
+# Spark master url. eg. spark://master_addr:7077. Leave empty if you want to use local mode
+export MASTER=yarn-client
+export SPARK_YARN_JAR={{spark_jar}}
+
+
+# Where log files are stored.  PWD by default.
+export ZEPPELIN_LOG_DIR={{zeppelin_log_dir}}
+
+# The pid files are stored. /tmp by default.
+export ZEPPELIN_PID_DIR={{zeppelin_pid_dir}}
+
+
+export JAVA_HOME={{java64_home}}
+
+# Additional jvm options. for example, export ZEPPELIN_JAVA_OPTS="-Dspark.executor.memory=8g -Dspark.cores.max=16"
+export ZEPPELIN_JAVA_OPTS="-Dhdp.version={{hdp_version}} -Dspark.executor.memory={{executor_mem}} -Dspark.executor.instances={{executor_instances}} -Dspark.yarn.queue={{spark_queue}}"
+
+
+# Zeppelin jvm mem options Default -Xmx1024m -XX:MaxPermSize=512m
+# export ZEPPELIN_MEM
+
+# zeppelin interpreter process jvm mem options. Defualt = ZEPPELIN_MEM
+# export ZEPPELIN_INTP_MEM
+
+# zeppelin interpreter process jvm options. Default = ZEPPELIN_JAVA_OPTS
+# export ZEPPELIN_INTP_JAVA_OPTS
+
+# Where notebook saved
+# export ZEPPELIN_NOTEBOOK_DIR
+
+# Id of notebook to be displayed in homescreen. ex) 2A94M5J1Z
+# export ZEPPELIN_NOTEBOOK_HOMESCREEN
+
+# hide homescreen notebook from list when this value set to "true". default "false"
+# export ZEPPELIN_NOTEBOOK_HOMESCREEN_HIDE
+
+# Bucket where notebook saved
+# export ZEPPELIN_NOTEBOOK_S3_BUCKET
+
+# User in bucket where notebook saved. For example bucket/user/notebook/2A94M5J1Z/note.json
+# export ZEPPELIN_NOTEBOOK_S3_USER
+
+# A string representing this instance of zeppelin. $USER by default
+# export ZEPPELIN_IDENT_STRING
+
+# The scheduling priority for daemons. Defaults to 0.
+# export ZEPPELIN_NICENESS
+
+
+#### Spark interpreter configuration ####
+
+## Use provided spark installation ##
+## defining SPARK_HOME makes Zeppelin run spark interpreter process using spark-submit
+##
+# (required) When it is defined, load it instead of Zeppelin embedded Spark libraries
+export SPARK_HOME={{spark_home}}
+
+# (optional) extra options to pass to spark submit. eg) "--driver-memory 512M --executor-memory 1G".
+# export SPARK_SUBMIT_OPTIONS
+
+## Use embedded spark binaries ##
+## without SPARK_HOME defined, Zeppelin still able to run spark interpreter process using embedded spark binaries.
+## however, it is not encouraged when you can define SPARK_HOME
+##
+# Options read in YARN client mode
+# yarn-site.xml is located in configuration directory in HADOOP_CONF_DIR.
+export HADOOP_CONF_DIR=/etc/hadoop/conf
+
+# Pyspark (supported with Spark 1.2.1 and above)
+# To configure pyspark, you need to set spark distribution's path to 'spark.home' property in Interpreter setting screen in Zeppelin GUI
+# path to the python command. must be the same path on the driver(Zeppelin) and all workers.
+# export PYSPARK_PYTHON
+
+export PYTHONPATH="${SPARK_HOME}/python:${SPARK_HOME}/python/lib/py4j-0.8.2.1-src.zip"
+export SPARK_YARN_USER_ENV="PYTHONPATH=${PYTHONPATH}"
+
+## Spark interpreter options ##
+##
+# Use HiveContext instead of SQLContext if set true. true by default.
+# export ZEPPELIN_SPARK_USEHIVECONTEXT
+
+# Execute multiple SQL concurrently if set true. false by default.
+# export ZEPPELIN_SPARK_CONCURRENTSQL
+
+# Max number of SparkSQL result to display. 1000 by default.
+# export ZEPPELIN_SPARK_MAXRESULT
+
+  </value>
+  </property>
+
+  <property>
+    <name>zeppelin.executor.mem</name>
+    <value>512m</value>
+    <description>Executor memory to use (e.g. 512m or 1g)</description>
+  </property>
+
+  <property>
+    <name>zeppelin.executor.instances</name>
+    <value>2</value>
+    <description>Number of executor instances to use (e.g. 2)</description>
+  </property>
+
+  <property>
+    <name>zeppelin.spark.jar.dir</name>
+    <value>/apps/zeppelin</value>
+    <description>Shared location where zeppelin spark jar will be copied to. Should be accesible
+      by all cluster nodes
+    </description>
+  </property>
+
+</configuration>

+ 17 - 0
ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/kerberos.json

@@ -0,0 +1,17 @@
+{
+  "services": [
+    {
+      "name": "ZEPPELIN",
+      "identities": [
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "components": [
+        {
+          "name": "ZEPPELIN_MASTER"
+        }
+      ]
+    }
+  ]
+}

+ 64 - 0
ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/metainfo.xml

@@ -0,0 +1,64 @@
+<?xml version="1.0"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ZEPPELIN</name>
+      <displayName>Zeppelin Notebook</displayName>
+      <comment>A web-based notebook that enables interactive data analytics. It enables you to
+        make beautiful data-driven, interactive and collaborative documents with SQL, Scala
+        and more.
+      </comment>
+      <version>0.6.0.2.5</version>
+      <components>
+        <component>
+          <name>ZEPPELIN_MASTER</name>
+          <displayName>Zeppelin Notebook</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <commandScript>
+            <script>scripts/master.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>10000</timeout>
+          </commandScript>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>zeppelin</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <requiredServices>
+        <service>SPARK</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>zeppelin-config</config-type>
+      </configuration-dependencies>
+      <restartRequiredAfterChange>false</restartRequiredAfterChange>
+    </service>
+  </services>
+</metainfo>

+ 26 - 0
ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/alert_check_zeppelin.py

@@ -0,0 +1,26 @@
+import glob
+import sys
+
+from resource_management.core.exceptions import ComponentIsNotRunning
+
+reload(sys)
+sys.setdefaultencoding('utf8')
+config = Script.get_config()
+
+zeppelin_pid_dir = config['configurations']['zeppelin-env']['zeppelin_pid_dir']
+
+RESULT_CODE_OK = 'OK'
+RESULT_CODE_CRITICAL = 'CRITICAL'
+RESULT_CODE_UNKNOWN = 'UNKNOWN'
+
+
+def execute(configurations={}, parameters={}, host_name=None):
+  try:
+    pid_file = glob.glob(zeppelin_pid_dir + '/zeppelin-*.pid')[0]
+    check_process_status(pid_file)
+  except ComponentIsNotRunning as ex:
+    return (RESULT_CODE_CRITICAL, [str(ex)])
+  except:
+    return (RESULT_CODE_CRITICAL, ["Zeppelin is not running"])
+
+  return (RESULT_CODE_OK, ["Successful connection to Zeppelin"])

+ 216 - 0
ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py

@@ -0,0 +1,216 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import glob
+import grp
+import os
+import pwd
+import sys
+from resource_management.core.resources import Directory
+from resource_management.core.resources.system import Execute, File
+from resource_management.core.source import InlineTemplate
+from resource_management.libraries import XmlConfig
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.script.script import Script
+
+class Master(Script):
+  def install(self, env):
+    import params
+    env.set_params(params)
+
+    Execute('chmod +x ' + params.service_packagedir + "/scripts/setup_snapshot.sh")
+
+    # Create user and group if they don't exist
+    self.create_linux_user(params.zeppelin_user, params.zeppelin_group)
+    self.install_packages(env)
+
+    Execute('chown -R ' + params.zeppelin_user + ':' + params.zeppelin_group + ' ' + params.zeppelin_dir)
+
+    # create the log, pid, zeppelin dirs
+    Directory([params.zeppelin_pid_dir, params.zeppelin_log_dir, params.zeppelin_dir],
+              owner=params.zeppelin_user,
+              group=params.zeppelin_group,
+              cd_access="a",
+              mode=0755
+              )
+
+    Execute('echo spark_version:' + params.spark_version + ' detected for spark_home: '
+            + params.spark_home + ' >> ' + params.zeppelin_log_file, user=params.zeppelin_user)
+
+    # update the configs specified by user
+    self.configure(env)
+
+    # run setup_snapshot.sh
+    Execute(format("{service_packagedir}/scripts/setup_snapshot.sh {zeppelin_dir} "
+                   "{hive_metastore_host} {hive_metastore_port} {hive_server_port} "
+                   "{zeppelin_host} {zeppelin_port} {setup_view} {service_packagedir} "
+                   "{java64_home} >> {zeppelin_log_file}"),
+            user=params.zeppelin_user)
+
+  def create_linux_user(self, user, group):
+    try:
+      pwd.getpwnam(user)
+    except KeyError:
+      Execute('adduser ' + user)
+    try:
+      grp.getgrnam(group)
+    except KeyError:
+      Execute('groupadd ' + group)
+
+  def create_zeppelin_dir(self, params):
+    params.HdfsResource(format("/user/{zeppelin_user}"),
+                        type="directory",
+                        action="create_on_execute",
+                        owner=params.zeppelin_user,
+                        recursive_chown=True,
+                        recursive_chmod=True
+                        )
+    params.HdfsResource(format("/user/{zeppelin_user}/test"),
+                        type="directory",
+                        action="create_on_execute",
+                        owner=params.zeppelin_user,
+                        recursive_chown=True,
+                        recursive_chmod=True
+                        )
+    params.HdfsResource(format("/apps/zeppelin"),
+                        type="directory",
+                        action="create_on_execute",
+                        owner=params.zeppelin_user,
+                        recursive_chown=True,
+                        recursive_chmod=True
+                        )
+
+    spark_deps_full_path = glob.glob(params.zeppelin_dir + '/interpreter/spark/dep/zeppelin-spark-dependencies-*.jar')[0]
+    spark_dep_file_name = os.path.basename(spark_deps_full_path);
+
+    params.HdfsResource(params.spark_jar_dir + "/" + spark_dep_file_name,
+                        type="file",
+                        action="create_on_execute",
+                        source=spark_deps_full_path,
+                        group=params.zeppelin_group,
+                        owner=params.zeppelin_user,
+                        mode=0444,
+                        replace_existing_files=True,
+                        )
+
+    params.HdfsResource(None, action="execute")
+
+  def configure(self, env):
+    import params
+    import status_params
+    env.set_params(params)
+    env.set_params(status_params)
+
+    # write out zeppelin-site.xml
+    XmlConfig("zeppelin-site.xml",
+              conf_dir=params.conf_dir,
+              configurations=params.config['configurations']['zeppelin-config'],
+              owner=params.zeppelin_user,
+              group=params.zeppelin_group
+              )
+    # write out zeppelin-env.sh
+    env_content = InlineTemplate(params.zeppelin_env_content)
+    File(format("{params.conf_dir}/zeppelin-env.sh"), content=env_content,
+         owner=params.zeppelin_user, group=params.zeppelin_group)  # , mode=0777)
+
+  def stop(self, env):
+    import params
+    Execute(params.zeppelin_dir + '/bin/zeppelin-daemon.sh stop >> ' + params.zeppelin_log_file,
+            user=params.zeppelin_user)
+
+  def start(self, env):
+    import params
+    import status_params
+    self.configure(env)
+
+    if glob.glob(
+            params.zeppelin_dir + '/interpreter/spark/dep/zeppelin-spark-dependencies-*.jar') and os.path.exists(
+      glob.glob(params.zeppelin_dir + '/interpreter/spark/dep/zeppelin-spark-dependencies-*.jar')[0]):
+      self.create_zeppelin_dir(params)
+
+    Execute(params.zeppelin_dir + '/bin/zeppelin-daemon.sh start >> '
+            + params.zeppelin_log_file, user=params.zeppelin_user)
+    pidfile = glob.glob(status_params.zeppelin_pid_dir
+                        + '/zeppelin-' + params.zeppelin_user + '*.pid')[0]
+    Execute('echo pid file is: ' + pidfile, user=params.zeppelin_user)
+    contents = open(pidfile).read()
+    Execute('echo pid is ' + contents, user=params.zeppelin_user)
+
+    # if first_setup:
+    import time
+    time.sleep(20)
+    self.update_zeppelin_interpreter()
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+
+    pid_file = glob.glob(status_params.zeppelin_pid_dir + '/zeppelin-'
+                         + status_params.zeppelin_user + '*.pid')[0]
+    check_process_status(pid_file)
+
+  def update_zeppelin_interpreter(self):
+    import params
+    import json, urllib2
+    zeppelin_int_url = 'http://' + params.zeppelin_host + ':' + str(
+      params.zeppelin_port) + '/api/interpreter/setting/'
+
+    # fetch current interpreter settings for spark, hive, phoenix
+    data = json.load(urllib2.urlopen(zeppelin_int_url))
+    print data
+    for body in data['body']:
+      if body['group'] == 'spark':
+        sparkbody = body
+      elif body['group'] == 'hive':
+        hivebody = body
+      elif body['group'] == 'phoenix':
+        phoenixbody = body
+
+    # if hive installed, update hive settings and post to hive interpreter
+    if (params.hive_server_host):
+      hivebody['properties']['hive.hiveserver2.url'] = 'jdbc:hive2://' \
+                                                       + params.hive_server_host \
+                                                       + ':' + params.hive_server_port
+      self.post_request(zeppelin_int_url + hivebody['id'], hivebody)
+
+    # if hbase installed, update hbase settings and post to phoenix interpreter
+    if (params.zookeeper_znode_parent and params.hbase_zookeeper_quorum):
+      phoenixbody['properties'][
+        'phoenix.jdbc.url'] = "jdbc:phoenix:" + params.hbase_zookeeper_quorum + ':' \
+                              + params.zookeeper_znode_parent
+      self.post_request(zeppelin_int_url + phoenixbody['id'], phoenixbody)
+
+  def post_request(self, url, body):
+    import json, urllib2
+    encoded_body = json.dumps(body)
+    req = urllib2.Request(str(url), encoded_body)
+    req.get_method = lambda: 'PUT'
+    try:
+      response = urllib2.urlopen(req, encoded_body).read()
+    except urllib2.HTTPError, error:
+      print 'Exception: ' + error.read()
+
+    jsonresp = json.loads(response.decode('utf-8'))
+    print jsonresp['status']
+
+
+if __name__ == "__main__":
+  Master().execute()

+ 155 - 0
ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/params.py

@@ -0,0 +1,155 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import functools
+import os
+import re
+
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.get_stack_version import get_stack_version
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.script.script import Script
+
+
+def get_port_from_url(address):
+  if not (address is None):
+    return address.split(':')[-1]
+  else:
+    return address
+
+
+# server configurations
+config = Script.get_config()
+
+# e.g. /var/lib/ambari-agent/cache/stacks/HDP/2.2/services/zeppelin-stack/package
+service_packagedir = os.path.realpath(__file__).split('/scripts')[0]
+
+zeppelin_dirname = 'zeppelin-server/lib'
+
+install_dir = '/usr/hdp/current'
+executor_mem = config['configurations']['zeppelin-env']['zeppelin.executor.mem']
+executor_instances = config['configurations']['zeppelin-env'][
+  'zeppelin.executor.instances']
+
+spark_jar_dir = config['configurations']['zeppelin-env']['zeppelin.spark.jar.dir']
+spark_jar = format("{spark_jar_dir}/zeppelin-spark-0.5.5-SNAPSHOT.jar")
+setup_view = True
+temp_file = config['configurations']['zeppelin-env']['zeppelin.temp.file']
+spark_home = "/usr/hdp/current/spark-client/"
+
+try:
+  fline = open(spark_home + "/RELEASE").readline().rstrip()
+  spark_version = re.search('Spark (\d\.\d).+', fline).group(1)
+except:
+  pass
+
+# params from zeppelin-config
+zeppelin_port = str(config['configurations']['zeppelin-config']['zeppelin.server.port'])
+
+# params from zeppelin-env
+zeppelin_user = config['configurations']['zeppelin-env']['zeppelin_user']
+zeppelin_group = config['configurations']['zeppelin-env']['zeppelin_group']
+zeppelin_log_dir = config['configurations']['zeppelin-env']['zeppelin_log_dir']
+zeppelin_pid_dir = config['configurations']['zeppelin-env']['zeppelin_pid_dir']
+zeppelin_log_file = os.path.join(zeppelin_log_dir, 'zeppelin-setup.log')
+zeppelin_hdfs_user_dir = format("/user/{zeppelin_user}")
+
+zeppelin_dir = os.path.join(*[install_dir, zeppelin_dirname])
+conf_dir = os.path.join(*[install_dir, zeppelin_dirname, 'conf'])
+notebook_dir = os.path.join(*[install_dir, zeppelin_dirname, 'notebook'])
+
+# zeppelin-env.sh
+zeppelin_env_content = config['configurations']['zeppelin-env']['content']
+
+# detect configs
+master_configs = config['clusterHostInfo']
+java64_home = config['hostLevelParams']['java_home']
+ambari_host = str(master_configs['ambari_server_host'][0])
+zeppelin_host = str(master_configs['zeppelin_master_hosts'][0])
+
+# detect HS2 details, if installed
+
+if 'hive_server_host' in master_configs and len(master_configs['hive_server_host']) != 0:
+  hive_server_host = str(master_configs['hive_server_host'][0])
+  hive_metastore_host = str(master_configs['hive_metastore_host'][0])
+  hive_metastore_port = str(
+    get_port_from_url(config['configurations']['hive-site']['hive.metastore.uris']))
+  hive_server_port = str(config['configurations']['hive-site']['hive.server2.thrift.http.port'])
+else:
+  hive_server_host = None
+  hive_metastore_host = '0.0.0.0'
+  hive_metastore_port = None
+  hive_server_port = None
+
+# detect hbase details if installed
+if 'hbase_master_hosts' in master_configs and 'hbase-site' in config['configurations']:
+  zookeeper_znode_parent = config['configurations']['hbase-site']['zookeeper.znode.parent']
+  hbase_zookeeper_quorum = config['configurations']['hbase-site']['hbase.zookeeper.quorum']
+else:
+  zookeeper_znode_parent = None
+  hbase_zookeeper_quorum = None
+
+# detect spark queue
+if 'spark.yarn.queue' in config['configurations']['spark-defaults']:
+  spark_queue = config['configurations']['spark-defaults']['spark.yarn.queue']
+else:
+  spark_queue = 'default'
+
+# e.g. 2.3
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+
+# e.g. 2.3.0.0
+hdp_stack_version = format_stack_version(stack_version_unformatted)
+
+# e.g. 2.3.0.0-2130
+full_version = default("/commandParams/version", None)
+hdp_version = full_version
+
+spark_client_version = get_stack_version('spark-client')
+
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+# create partial functions with common arguments for every HdfsResource call
+# to create hdfs directory we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  hdfs_resource_ignore_file="/var/lib/ambari-agent/data/.hdfs_resource_ignore",
+  security_enabled=security_enabled,
+  keytab=hdfs_user_keytab,
+  kinit_path_local=kinit_path_local,
+  hadoop_bin_dir=hadoop_bin_dir,
+  hadoop_conf_dir=hadoop_conf_dir,
+  principal_name=hdfs_principal_name,
+  hdfs_site=hdfs_site,
+  default_fs=default_fs
+)

+ 92 - 0
ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/setup_snapshot.sh

@@ -0,0 +1,92 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+set -e
+#e.g. /opt/incubator-zeppelin
+export INSTALL_DIR=$1
+
+#e.g. sandbox.hortonworks.com
+export HIVE_METASTORE_HOST=$2
+
+#e.g. 9083
+export HIVE_METASTORE_PORT=$3
+
+#e.g. 10000
+export HIVE_SERVER_PORT=$4
+
+export ZEPPELIN_HOST=$5
+
+export ZEPPELIN_PORT=$6
+
+#if true, will setup Ambari view and import notebooks
+export SETUP_VIEW=$7
+
+export PACKAGE_DIR=$8
+export java64_home=$9
+
+SETUP_VIEW=${SETUP_VIEW,,}
+echo "SETUP_VIEW is $SETUP_VIEW"
+
+SetupZeppelin () {
+
+  echo "Setting up zeppelin at $INSTALL_DIR"
+  cd $INSTALL_DIR
+
+  rm -rf notebook/*
+
+  if [ "$HIVE_METASTORE_HOST" != "0.0.0.0" ]
+  then
+    echo "Hive metastore detected: $HIVE_METASTORE_HOST. Setting up conf/hive-site.xml"
+    echo "<configuration>" > conf/hive-site.xml
+    echo "<property>" >> conf/hive-site.xml
+    echo "   <name>hive.metastore.uris</name>" >> conf/hive-site.xml
+    echo "   <value>thrift://$HIVE_METASTORE_HOST:$HIVE_METASTORE_PORT</value>" >> conf/hive-site.xml
+    echo "</property>" >> conf/hive-site.xml
+    echo "<property>" >> conf/hive-site.xml
+    echo "   <name>hive.server2.thrift.http.port</name>" >> conf/hive-site.xml
+    echo "   <value>$HIVE_SERVER_PORT</value>" >> conf/hive-site.xml
+    echo "</property>" >> conf/hive-site.xml
+    echo "</configuration>" >> conf/hive-site.xml
+  else
+    echo "HIVE_METASTORE_HOST is $HIVE_METASTORE_HOST: Skipping hive-site.xml setup as Hive does not seem to be installed"
+  fi
+
+  if [[ $SETUP_VIEW == "true" ]]
+  then
+    echo "Importing notebooks"
+    mkdir -p notebook
+    cd notebook
+    wget https://github.com/hortonworks-gallery/zeppelin-notebooks/archive/master.zip -O notebooks.zip
+    unzip notebooks.zip
+
+    if [ -d "zeppelin-notebooks-master" ]; then
+      mv zeppelin-notebooks-master/* .
+      rm -rf zeppelin-notebooks-master
+    fi
+
+    cd ..
+  else
+    echo "Skipping import of sample notebooks"
+  fi
+
+}
+
+SetupZeppelin
+echo "Setup complete"

+ 29 - 0
ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/status_params.py

@@ -0,0 +1,29 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management.libraries.script import Script
+
+config = Script.get_config()
+
+zeppelin_pid_dir = config['configurations']['zeppelin-env']['zeppelin_pid_dir']
+zeppelin_user = config['configurations']['zeppelin-env']['zeppelin_user']
+zeppelin_group = config['configurations']['zeppelin-env']['zeppelin_group']
+zeppelin_log_dir = config['configurations']['zeppelin-env']['zeppelin_log_dir']

+ 8 - 0
ambari-server/src/main/resources/stacks/HDP/2.5/role_command_order.json

@@ -0,0 +1,8 @@
+{
+  "_comment" : "Record format:",
+  "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
+  "general_deps" : {
+    "_comment" : "dependencies for all cases",
+    "ZEPPELIN_MASTER-START" : ["NAMENODE-START"]
+  }
+}

+ 46 - 0
ambari-server/src/main/resources/stacks/HDP/2.5/services/ZEPPELIN/metainfo.xml

@@ -0,0 +1,46 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<metainfo>
+    <schemaVersion>2.0</schemaVersion>
+    <services>
+        <service>
+            <name>ZEPPELIN</name>
+            <version>0.6.0.2.5</version>
+            <extends>common-services/ZEPPELIN/0.6.0.2.5</extends>
+            <osSpecifics>
+                <osSpecific>
+                    <osFamily>redhat7,amazon2015,redhat6,suse11</osFamily>
+                    <packages>
+                        <package>
+                            <name>zeppelin_${stack_version}</name>
+                        </package>
+                    </packages>
+                </osSpecific>
+                <osSpecific>
+                    <osFamily>debian7,ubuntu12,ubuntu14</osFamily>
+                    <packages>
+                        <package>
+                            <name>zeppelin-${stack_version}</name>
+                        </package>
+                    </packages>
+                </osSpecific>
+            </osSpecifics>
+        </service>
+    </services>
+</metainfo>

+ 1 - 0
contrib/views/pom.xml

@@ -42,6 +42,7 @@
     <module>capacity-scheduler</module>
     <module>tez</module>
     <module>storm</module>
+    <module>zeppelin</module>
   </modules>
   <build>
     <pluginManagement>

+ 160 - 0
contrib/views/zeppelin/pom.xml

@@ -0,0 +1,160 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <groupId>org.apache.ambari.contrib.views</groupId>
+  <artifactId>zeppelin-view</artifactId>
+  <version>1.0.0.0</version>
+  <name>Zeppelin</name>
+
+  <parent>
+    <groupId>org.apache.ambari.contrib.views</groupId>
+    <artifactId>ambari-contrib-views</artifactId>
+    <version>2.0.0.0-SNAPSHOT</version>
+  </parent>
+
+  <dependencies>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <version>4.8.1</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.easymock</groupId>
+      <artifactId>easymock</artifactId>
+      <version>3.1</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.ambari</groupId>
+      <artifactId>ambari-views</artifactId>
+      <version>[1.7.0.0,)</version>
+    </dependency>
+    <dependency>
+      <groupId>com.sun.jersey</groupId>
+      <artifactId>jersey-server</artifactId>
+      <version>1.8</version>
+    </dependency>
+    <dependency>
+      <groupId>javax.servlet</groupId>
+      <artifactId>servlet-api</artifactId>
+      <version>2.5</version>
+      <scope>provided</scope>
+    </dependency>
+
+
+    <dependency>
+      <groupId>commons-configuration</groupId>
+      <artifactId>commons-configuration</artifactId>
+      <version>1.6</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.commons</groupId>
+      <artifactId>commons-csv</artifactId>
+      <version>1.0</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.commons</groupId>
+      <artifactId>commons-collections4</artifactId>
+      <version>4.0</version>
+    </dependency>
+
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-api</artifactId>
+      <version>1.7.5</version>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.ambari.contrib.views</groupId>
+      <artifactId>ambari-views-utils</artifactId>
+      <version>2.0.0.0-SNAPSHOT</version>
+    </dependency>
+
+  </dependencies>
+
+  <properties>
+    <ambari.dir>${project.parent.parent.parent.basedir}</ambari.dir>
+    <hive-version>1.0.0</hive-version>
+    <ambari.version>1.3.0.0-SNAPSHOT</ambari.version>
+  </properties>
+  <build>
+    <plugins>
+
+      <!-- Building frontend -->
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-compiler-plugin</artifactId>
+        <version>3.1</version>
+        <configuration>
+          <source>1.7</source>
+          <target>1.7</target>
+        </configuration>
+      </plugin>
+      <plugin>
+        <artifactId>maven-dependency-plugin</artifactId>
+        <executions>
+          <execution>
+            <phase>generate-resources</phase>
+            <goals>
+              <goal>copy-dependencies</goal>
+            </goals>
+            <configuration>
+              <outputDirectory>${project.build.directory}/lib</outputDirectory>
+              <includeScope>runtime</includeScope>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.vafer</groupId>
+        <artifactId>jdeb</artifactId>
+        <version>1.0.1</version>
+        <executions>
+          <execution>
+            <phase>none</phase>
+            <goals>
+              <goal>jdeb</goal>
+            </goals>
+          </execution>
+        </executions>
+        <configuration>
+          <submodules>false</submodules>
+        </configuration>
+      </plugin>
+    </plugins>
+    <resources>
+      <resource>
+        <directory>src/main/resources</directory>
+        <filtering>true</filtering>
+        <includes>
+          <include>WEB-INF/web.xml</include>
+          <include>META-INF/**/*</include>
+          <include>view.xml</include>
+          <include>WEB-INF/index.jsp</include>
+        </includes>
+      </resource>
+      <resource>
+        <targetPath>WEB-INF/lib</targetPath>
+        <filtering>false</filtering>
+        <directory>target/lib</directory>
+      </resource>
+    </resources>
+  </build>
+</project>

+ 57 - 0
contrib/views/zeppelin/src/main/java/org/apache/ambari/view/zeppelin/ZeppelinServlet.java

@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.view.zeppelin;
+
+import org.apache.ambari.view.ViewContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.servlet.ServletConfig;
+import javax.servlet.ServletContext;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import java.io.IOException;
+
+
+public class ZeppelinServlet extends HttpServlet {
+    private ViewContext viewContext;
+    private final static Logger LOG = LoggerFactory.getLogger(ZeppelinServlet.class);
+
+    @Override
+    public void init(ServletConfig config) throws ServletException {
+        super.init(config);
+
+        ServletContext context = config.getServletContext();
+        viewContext = (ViewContext) context.getAttribute(ViewContext.CONTEXT_ATTRIBUTE);
+    }
+
+    @Override
+    protected void doGet(HttpServletRequest request, HttpServletResponse response) throws IOException, ServletException {
+        response.setContentType("text/html");
+        response.setStatus(HttpServletResponse.SC_OK);
+
+        String port = viewContext.getProperties().get("zeppelin.server.port");
+        String publicName = viewContext.getProperties().get("zeppelin.host.publicname");
+        request.setAttribute("port", port);
+
+        request.getRequestDispatcher("WEB-INF/index.jsp").forward(request, response);
+    }
+}

+ 56 - 0
contrib/views/zeppelin/src/main/resources/WEB-INF/index.jsp

@@ -0,0 +1,56 @@
+<!DOCTYPE html>
+<%--
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+--%>
+<html lang="en">
+<head>
+    <meta charset="utf-8"/>
+</head>
+<body>
+
+<iframe id='zeppelinIFrame' width="100%" seamless="seamless" style="border: 0px;"></iframe>
+<script>
+    var $ = jQuery = parent.jQuery;
+    var iframe = document.querySelector('#zeppelinIFrame');
+    var port = "${port}";
+    var publicName = "${publicname}";
+
+
+    $.getJSON('/api/v1/clusters', function (data) {
+        $.getJSON('/api/v1/clusters/' +
+                data['items'][0]['Clusters']['cluster_name'] +
+                '/hosts?fields=Hosts%2Fpublic_host_name%2Chost_components%2FHostRoles%2Fcomponent_name',
+                function (data) {
+                    for (var i in data['items']) {
+                        for (var j in data['items'][i]['host_components']) {
+                            if (data['items'][i]['host_components'][j]['HostRoles']['component_name'] == 'ZEPPELIN_MASTER') {
+                                var url = '//' + data['items'][i]['host_components'][j]['HostRoles']['host_name'] + ':' + port;
+                                iframe.src = url;
+                                iframe.height = window.innerHeight;
+                                return;
+                            }
+                        }
+                    }
+                });
+    });
+
+    $(window).resize(function () {
+        iframe.height = window.innerHeight;
+    });
+</script>
+</body>
+</html>

+ 38 - 0
contrib/views/zeppelin/src/main/resources/WEB-INF/web.xml

@@ -0,0 +1,38 @@
+<?xml version="1.0" encoding="ISO-8859-1" ?>
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. Kerberos, LDAP, Custom. Binary/Htt
+-->
+
+<web-app xmlns="http://java.sun.com/xml/ns/j2ee"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://java.sun.com/xml/ns/j2ee http://java.sun.com/xml/ns/j2ee/web-app_2_4.xsd"
+         version="2.4">
+
+  <display-name>Zeppelin view servlet</display-name>
+  <description>
+    This is the Zeppelin view servlet application.
+  </description>
+  <servlet>
+    <servlet-name>ZeppelinServlet</servlet-name>
+    <servlet-class>org.apache.ambari.view.zeppelin.ZeppelinServlet</servlet-class>
+  </servlet>
+
+  <servlet-mapping>
+    <servlet-name>ZeppelinServlet</servlet-name>
+    <url-pattern>/</url-pattern>
+  </servlet-mapping>
+</web-app>

+ 48 - 0
contrib/views/zeppelin/src/main/resources/view.xml

@@ -0,0 +1,48 @@
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. Kerberos, LDAP, Custom. Binary/Htt
+-->
+<view>
+  <name>ZEPPELIN</name>
+  <label>Zeppelin View!</label>
+  <version>1.0.0</version>
+  <build>${env.BUILD_NUMBER}</build>
+  <description>Ambari view for Apache Zeppelin</description>
+
+  <parameter>
+    <name>zeppelin.server.port</name>
+    <description>Zeppelin Http port (example: 9995).</description>
+    <label>Zeppelin Http port</label>
+    <cluster-config>zeppelin-config/zeppelin.server.port</cluster-config>
+    <required>true</required>
+  </parameter>
+  <parameter>
+    <name>zeppelin.host.publicname</name>
+    <description>Zeppelin host name</description>
+    <label>Zeppelin host name</label>
+    <cluster-config>zeppelin-ambari-config/zeppelin.host.publicname</cluster-config>
+    <required>true</required>
+  </parameter>
+
+  <auto-instance>
+    <name>AUTO_ZEPPELIN_INSTANCE</name>
+    <label>Zeppelin View</label>
+    <description>This view instance is auto created when the Zeppelin service is added to a cluster.</description>
+    <stack-id>HDP-2.*</stack-id>
+    <services>
+      <service>ZEPPELIN</service>
+    </services>
+  </auto-instance>
+</view>