Ver código fonte

AMBARI-759. Add puppet scripts to the agent for configuring/installing various services and add security aspects to api's and server/agent. (mahadev)

git-svn-id: https://svn.apache.org/repos/asf/incubator/ambari/branches/AMBARI-666@1387826 13f79535-47bb-0310-9956-ffa450edef68
Mahadev Konar 12 anos atrás
pai
commit
a30758b5cc
60 arquivos alterados com 4587 adições e 76 exclusões
  1. 4 0
      AMBARI-666-CHANGES.txt
  2. 48 0
      ambari-agent/src/main/puppet/manifestloader/site.pp
  3. 54 0
      ambari-agent/src/main/puppet/modules/configgenerator/manifests/configfile.pp
  4. 23 0
      ambari-agent/src/main/puppet/modules/configgenerator/manifests/init.pp
  5. 62 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/files/checkForFormat.sh
  6. 56 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/client.pp
  7. 100 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/datanode.pp
  8. 77 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/copyfromlocal.pp
  9. 74 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/directory.pp
  10. 83 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/service_check.pp
  11. 243 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp
  12. 94 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/jobtracker.pp
  13. 29 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/jobtracker/service_check.pp
  14. 75 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/mapred/service_check.pp
  15. 189 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode.pp
  16. 57 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode/format.pp
  17. 28 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode/service_check.pp
  18. 44 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/package.pp
  19. 172 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp
  20. 118 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/service.pp
  21. 24 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/jobtracker-conn.pp
  22. 27 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/master-conn.pp
  23. 27 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/namenode-conn.pp
  24. 46 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/smoketest.pp
  25. 98 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/snamenode.pp
  26. 94 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/tasktracker.pp
  27. 68 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/capacity-scheduler.xml.erb
  28. 25 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/commons-logging.properties.erb
  29. 254 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/core-site.xml.erb
  30. 89 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb
  31. 37 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties-GANGLIA.erb
  32. 37 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties.erb
  33. 134 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-policy.xml.erb
  34. 118 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/health_check.erb
  35. 171 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/log4j.properties.erb
  36. 39 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/mapred-queue-acls.xml.erb
  37. 531 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/mapred-site.xml.erb
  38. 3 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/slaves.erb
  39. 20 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/taskcontroller.cfg.erb
  40. 65 0
      ambari-agent/src/main/python/ambari_agent/security.py
  41. 40 0
      ambari-project/pom.xml
  42. 40 0
      ambari-server/pom.xml
  43. 6 0
      ambari-server/src/main/assemblies/server.xml
  44. 68 4
      ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
  45. 154 71
      ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
  46. 2 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java
  47. 55 0
      ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RoleDAO.java
  48. 55 0
      ambari-server/src/main/java/org/apache/ambari/server/orm/dao/UserDAO.java
  49. 51 0
      ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RoleEntity.java
  50. 94 0
      ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UserEntity.java
  51. 96 0
      ambari-server/src/main/java/org/apache/ambari/server/security/AmbariUserDetailsService.java
  52. 164 0
      ambari-server/src/main/java/org/apache/ambari/server/security/CertificateManager.java
  53. 26 0
      ambari-server/src/main/java/org/apache/ambari/server/security/ClientSecurityType.java
  54. 47 0
      ambari-server/src/main/java/org/apache/ambari/server/security/unsecured/rest/CertificateDownload.java
  55. 5 1
      ambari-server/src/main/resources/META-INF/persistence.xml
  56. 1 0
      ambari-server/src/main/resources/pass.txt
  57. 35 0
      ambari-server/src/main/resources/users.ldif
  58. 52 0
      ambari-server/src/main/resources/webapp/WEB-INF/spring-security.xml
  59. 33 0
      ambari-server/src/main/resources/webapp/WEB-INF/web.xml
  60. 26 0
      ambari-server/src/main/resources/webapp/index.html

+ 4 - 0
AMBARI-666-CHANGES.txt

@@ -12,6 +12,10 @@ AMBARI-666 branch (unreleased changes)
 
   NEW FEATURES
 
+  AMBARI-759. Add puppet scripts to the agent for configuring/installing
+  various services and add security aspects to api's and server/agent.
+  (mahadev)
+
   AMBARI-749. Complete Java side implementation of bootstrapping agent hosts.
   (mahadev)
 

+ 48 - 0
ambari-agent/src/main/puppet/manifestloader/site.pp

@@ -0,0 +1,48 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+		
+class manifestloader () {
+    file { '/etc/puppet/agent/modules.tgz':
+      ensure => present,
+      source => "puppet:///modules/catalog/modules.tgz",  
+      mode => '0755',
+    }
+
+    exec { 'untar_modules':
+      command => "rm -rf /etc/puppet/agent/modules ; tar zxf /etc/puppet/agent/modules.tgz -C /etc/puppet/agent/ --strip-components 3",
+      path    => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
+    } 
+
+    exec { 'puppet_apply':
+      command   => "sh /etc/puppet/agent/modules/puppetApply.sh",
+      timeout   => 1800,
+      path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+      logoutput => "true"
+    }
+
+    File['/etc/puppet/agent/modules.tgz'] -> Exec['untar_modules'] -> Exec['puppet_apply']
+}
+
+node default {
+ stage{1 :}
+ class {'manifestloader': stage => 1}
+}
+

+ 54 - 0
ambari-agent/src/main/puppet/modules/configgenerator/manifests/configfile.pp

@@ -0,0 +1,54 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+#
+# Generates xml configs from the given key-value hash maps
+#
+# Config file format:
+#
+# <configuration>
+#   <property>
+#     <name>name1</name><value>value1</value>
+#   </property>
+#     ..
+#   <property>
+#     <name>nameN</name><value>valueN</value>
+#   </property>
+# </configuration>
+#
+
+define configgenerator::configfile ($configname=$title, $module, $properties) {
+    $configcontent => inline_template( "
+	  <configuration>
+	  <% properties.each do |key,value| -%>
+     <property>
+	  <name><%=key %></name><value><%=value %></value>
+     </property>
+	  <% end -%>
+	  </configuration>
+	")
+ 
+	file {'config':
+       ensure  => present,
+       content => $configcontent,
+		path => "/etc/puppet/agent/modules/${module}/templates/${configname}",
+     }
+	) 

+ 23 - 0
ambari-agent/src/main/puppet/modules/configgenerator/manifests/init.pp

@@ -0,0 +1,23 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+class configgenerator() {
+}

+ 62 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/files/checkForFormat.sh

@@ -0,0 +1,62 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export hdfs_user=$1
+shift
+export conf_dir=$1
+shift
+export mark_dir=$1
+shift
+export name_dirs=$*
+
+export EXIT_CODE=0
+export command="namenode -format"
+export list_of_non_empty_dirs=""
+
+mark_file=/var/run/hadoop/hdfs/namenode-formatted
+if [[ -f ${mark_file} ]] ; then
+  rm -f ${mark_file}
+  mkdir -p ${mark_dir}
+fi
+
+if [[ ! -d $mark_dir ]] ; then
+  for dir in `echo $name_dirs | tr ',' ' '` ; do
+    echo "NameNode Dirname = $dir"
+    cmd="ls $dir | wc -l  | grep -q ^0$"
+    eval $cmd
+    if [[ $? -ne 0 ]] ; then
+      (( EXIT_CODE = $EXIT_CODE + 1 ))
+      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
+    fi
+  done
+
+  if [[ $EXIT_CODE == 0 ]] ; then
+    su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
+  else
+    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
+  fi
+else
+  echo "${mark_dir} exists. Namenode DFS already formatted"
+fi
+
+exit $EXIT_CODE
+

+ 56 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/client.pp

@@ -0,0 +1,56 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hadoop::client(
+  $service_state = $hdp::params::cluster_client_state
+) inherits hdp::params
+{
+  $hdp::params::service_exists['hdp-hadoop::client'] = true
+
+  Hdp-hadoop::Common<||>{service_states +> $service_state}
+
+  if ($hdp::params::use_32_bits_on_slaves == true) {
+    Hdp-hadoop::Package<||>{include_32_bit => true}
+    Hdp-hadoop::Configfile<||>{sizes +> 32}
+  } else {
+    Hdp-hadoop::Package<||>{include_64_bit => true}
+    Hdp-hadoop::Configfile<||>{sizes +> 64}
+  }
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['installed_and_configured','uninstalled']) {
+    #adds package, users and directories, and common hadoop configs
+    include hdp-hadoop::initialize
+
+    if ( ($service_state == 'installed_and_configured') and
+         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
+      $masterHost = $kerberos_adminclient_host[0]
+      hdp::download_keytab { 'hadoop_client_ambari_qa_keytab' :
+        masterhost => $masterHost,
+        keytabdst => "${$keytab_path}/ambari_qa.headless.keytab",
+        keytabfile => 'ambari_qa.headless.keytab',
+        owner => 'ambari_qa',
+        hostnameInPrincipals => 'no'
+      }
+    }
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}

+ 100 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/datanode.pp

@@ -0,0 +1,100 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hadoop::datanode(
+  $service_state = $hdp::params::cluster_service_state,
+  $opts = {}
+) inherits hdp-hadoop::params 
+{
+
+  $hdp::params::service_exists['hdp-hadoop::datanode'] = true
+
+  Hdp-hadoop::Common<||>{service_states +> $service_state}
+
+  if ($hdp::params::use_32_bits_on_slaves == true) {
+    Hdp-hadoop::Package<||>{include_32_bit => true}
+    Hdp-hadoop::Configfile<||>{sizes +> 32}
+  } else {
+    Hdp-hadoop::Package<||>{include_64_bit => true}
+    Hdp-hadoop::Configfile<||>{sizes +> 64}
+  }
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
+    $dfs_data_dir = $hdp-hadoop::params::dfs_data_dir
+  
+    if (($hdp::params::service_exists['hdp-hadoop::namenode'] == true) or ($hdp::params::service_exists['hdp-hadoop::snamenode'] == true)){
+      $a_namenode_on_node = true
+    } else {
+      $a_namenode_on_node = false
+    }
+
+    #adds package, users and directories, and common hadoop configs
+    include hdp-hadoop::initialize
+
+    if ( ($service_state == 'installed_and_configured') and
+         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
+      $masterHost = $kerberos_adminclient_host[0]
+      hdp::download_keytab { 'datanode_service_keytab' :
+        masterhost => $masterHost,
+        keytabdst => "${$keytab_path}/dn.service.keytab",
+        keytabfile => 'dn.service.keytab',
+        owner => $hdp-hadoop::params::hdfs_user
+      }
+    }
+
+  
+    hdp-hadoop::datanode::create_data_dirs { $dfs_data_dir: 
+      service_state => $service_state
+    }
+
+    if ($a_namenode_on_node == true){
+      $create_pid_dir = false
+      $create_log_dir = false
+    } else {
+      $create_pid_dir = true
+      $create_log_dir = true
+    }
+    
+    hdp-hadoop::service{ 'datanode':
+      ensure         => $service_state,
+      user           => $hdp-hadoop::params::hdfs_user,
+      create_pid_dir => $create_pid_dir,
+      create_log_dir => $create_log_dir
+    }
+    
+    #top level does not need anchors
+    Class['hdp-hadoop'] -> Hdp-hadoop::Service['datanode']
+    Hdp-hadoop::Datanode::Create_data_dirs<||> -> Hdp-hadoop::Service['datanode']
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
+
+define hdp-hadoop::datanode::create_data_dirs($service_state)
+{
+  $dirs = hdp_array_from_comma_list($name)
+  hdp::directory_recursive_create { $dirs :
+    owner => $hdp-hadoop::params::hdfs_user,
+    mode => '0750',
+    service_state => $service_state,
+    force => true
+  }
+}

+ 77 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/copyfromlocal.pp

@@ -0,0 +1,77 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+define hdp-hadoop::hdfs::copyfromlocal(
+  $service_state,
+  $owner = unset,
+  $group = unset,
+  $recursive_chown = false,
+  $mode = undef,
+  $recursive_chmod = false,
+  $dest_dir = undef 
+) 
+{
+ 
+  if ($service_state == 'running') {
+    $copy_cmd = "fs -copyFromLocal ${name} ${dest_dir}"
+    hdp-hadoop::exec-hadoop { $copy_cmd:
+      command => $copy_cmd,
+      unless => "hadoop fs -ls ${dest_dir} >/dev/null 2>&1",
+      user => $owner
+    }
+    if ($owner == unset) {
+      $chown = ""
+    } else {
+      if ($group == unset) {
+        $chown = $owner
+      } else {
+        $chown = "${owner}:${group}"
+     } 
+    }  
+ 
+    if (chown != "") {
+      #TODO: see if there is a good 'unless test'
+      if ($recursive_chown == true) {
+        $chown_cmd = "fs -chown -R ${chown} ${dest_dir}"
+      } else {
+        $chown_cmd = "fs -chown ${chown} ${dest_dir}"
+      }
+      hdp-hadoop::exec-hadoop {$chown_cmd :
+        command => $chown_cmd,
+        user => $owner
+      }
+      Hdp-hadoop::Exec-hadoop[$copy_cmd] -> Hdp-hadoop::Exec-hadoop[$chown_cmd]
+    }
+  
+    if ($mode != undef) {
+      #TODO: see if there is a good 'unless test'
+      if ($recursive_mode == true) {
+        $chmod_cmd = "fs -chmod -R ${mode} ${dest_dir}"
+      } else {
+        $chmod_cmd = "fs -chmod ${mode} ${dest_dir}"
+      }
+      hdp-hadoop::exec-hadoop {$chmod_cmd :
+        command => $chmod_cmd,
+        user => $owner
+      }
+      Hdp-hadoop::Exec-hadoop[$copy_cmd] -> Hdp-hadoop::Exec-hadoop[$chmod_cmd]
+    }
+  }       
+}

+ 74 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/directory.pp

@@ -0,0 +1,74 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#TODO: unset should br changed to undef; just to be consistent
+define hdp-hadoop::hdfs::directory(
+  $service_state = 'running',
+  $owner = unset,
+  $group = unset,
+  $recursive_chown = false,
+  $mode = undef,
+  $recursive_chmod = false
+) 
+{
+ 
+  if ($service_state == 'running') {
+    $mkdir_cmd = "fs -mkdir ${name}"
+    hdp-hadoop::exec-hadoop { $mkdir_cmd:
+      command => $mkdir_cmd,
+      unless => "hadoop fs -ls ${name} >/dev/null 2>&1"
+    }
+    if ($owner == unset) {
+      $chown = ""
+    } else {
+      if ($group == unset) {
+        $chown = $owner
+      } else {
+        $chown = "${owner}:${group}"
+     } 
+    }  
+ 
+    if (chown != "") {
+      #TODO: see if there is a good 'unless test'
+      if ($recursive_chown == true) {
+        $chown_cmd = "fs -chown -R ${chown} ${name}"
+      } else {
+        $chown_cmd = "fs -chown ${chown} ${name}"
+      }
+      hdp-hadoop::exec-hadoop {$chown_cmd :
+        command => $chown_cmd
+      }
+      Hdp-hadoop::Exec-hadoop[$mkdir_cmd] -> Hdp-hadoop::Exec-hadoop[$chown_cmd]
+    }
+  
+    if ($mode != undef) {
+      #TODO: see if there is a good 'unless test'
+      if ($recursive_mode == true) {
+        $chmod_cmd = "fs -chmod -R ${mode} ${name}"
+      } else {
+        $chmod_cmd = "fs -chmod ${mode} ${name}"
+      }
+      hdp-hadoop::exec-hadoop {$chmod_cmd :
+        command => $chmod_cmd
+      }
+      Hdp-hadoop::Exec-hadoop[$mkdir_cmd] -> Hdp-hadoop::Exec-hadoop[$chmod_cmd]
+    }
+  }       
+}

+ 83 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/service_check.pp

@@ -0,0 +1,83 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hadoop::hdfs::service_check()
+{
+  $unique = hdp_unique_id_and_date()
+  $dir = '/tmp'
+  $tmp_file = "${dir}/${unique}"
+
+  $safemode_command = "dfsadmin -safemode get | grep OFF"
+
+  $create_dir_cmd = "fs -mkdir ${dir} ; hadoop fs -chmod -R 777 ${dir}"
+  $test_dir_exists = "hadoop fs -test -e ${dir}" #TODO: may fix up fact that test needs explicit hadoop while omamnd does not
+  $cleanup_cmd = "fs -rm ${tmp_file}"
+  #cleanup put below to handle retries; if retrying there wil be a stale file that needs cleanup; exit code is fn of second command
+  $create_file_cmd = "${cleanup_cmd}; hadoop fs -put /etc/passwd ${tmp_file}" #TODO: inconsistent that second comamnd needs hadoop
+  $test_cmd = "fs -test -e ${tmp_file}"
+
+  anchor { 'hdp-hadoop::hdfs::service_check::begin':}
+
+  hdp-hadoop::exec-hadoop { 'hdfs::service_check::check_safemode':
+    command   => $safemode_command,
+    tries     => 40,
+    try_sleep => 15,
+    logoutput => true,
+    user      => $hdp::params::smokeuser,
+    require   => Anchor['hdp-hadoop::hdfs::service_check::begin']
+  }
+
+  hdp-hadoop::exec-hadoop { 'hdfs::service_check::create_dir':
+    command   => $create_dir_cmd,
+    unless    => $test_dir_exists,
+    tries     => 3,
+    try_sleep => 5,
+    user      => $hdp::params::smokeuser,
+    require   => Hdp-hadoop::Exec-hadoop['hdfs::service_check::check_safemode']
+  }
+
+  hdp-hadoop::exec-hadoop { 'hdfs::service_check::create_file':
+    command   => $create_file_cmd,
+    tries     => 3,
+    try_sleep => 5,
+    user      => $hdp::params::smokeuser,
+    require   => Hdp-hadoop::Exec-hadoop['hdfs::service_check::create_dir'],
+    notify    => Hdp-hadoop::Exec-hadoop['hdfs::service_check::test']
+  }
+
+  hdp-hadoop::exec-hadoop { 'hdfs::service_check::test':
+    command     => $test_cmd,
+    refreshonly => true,
+    user      => $hdp::params::smokeuser,
+    require     => Hdp-hadoop::Exec-hadoop['hdfs::service_check::create_file'],
+    #notify      => Hdp-hadoop::Exec-hadoop['hdfs::service_check::cleanup']  #TODO: put in after testing
+    before      => Anchor['hdp-hadoop::hdfs::service_check::end'] #TODO: remove after testing
+  }
+
+   #TODO: put in after testing
+ #  hdp-hadoop::exec-hadoop { 'hdfs::service_check::cleanup':
+ #   command     => $cleanup_cmd,
+ #   refreshonly => true,
+ #   require     => Hdp-hadoop::Exec-hadoop['hdfs::service_check::test'],
+ #   before      => Anchor['hdp-hadoop::hdfs::service_check::end']
+  #}
+  anchor{ 'hdp-hadoop::hdfs::service_check::end':}
+
+}

+ 243 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp

@@ -0,0 +1,243 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#singleton for use with <||> form so that namenode, datanode, etc can pass state to hdp-hadoop and still use include
+define hdp-hadoop::common(
+  $service_states = []
+)
+{
+  class { 'hdp-hadoop':
+    service_states => $service_states    
+  }
+  anchor{'hdp-hadoop::common::begin':} -> Class['hdp-hadoop'] -> anchor{'hdp-hadoop::common::end':} 
+}
+
+class hdp-hadoop::initialize()
+{
+  if ($hdp::params::component_exists['hdp-hadoop'] == true) {
+  } else {
+    $hdp::params::component_exists['hdp-hadoop'] = true
+  }
+  hdp-hadoop::common { 'common':}
+  anchor{'hdp-hadoop::initialize::begin':} -> Hdp-hadoop::Common['common'] -> anchor{'hdp-hadoop::initialize::end':}
+}
+
+class hdp-hadoop(
+  $service_states  = []
+)
+{
+  include configgenerator
+
+  configgenerator::configfile('hdfs-site.xml': 
+    module => 'hdp-hadoop',
+    properties => {'dfs.name.dir' => '<%=scope.function_hdp_template_var("dfs_name_dir")%>',
+      'dfs.support.append' => '<%=scope.function_hdp_template_var("dfs_support_append")%>',
+      'dfs.webhdfs.enabled' => '<%=scope.function_hdp_template_var("dfs_webhdfs_enabled")%>',
+      'dfs.datanode.failed.volumes.tolerated' => '<%=scope.function_hdp_template_var("dfs_datanode_failed_volume_tolerated")%>',
+      'dfs.block.local-path-access.user' => '<%=scope.function_hdp_template_var("dfs_block_local_path_access_user")%>',
+      'dfs.data.dir' => '<%=scope.function_hdp_template_var("dfs_data_dir")%>'},)
+  
+  include hdp-hadoop::params
+  $hadoop_config_dir = $hdp-hadoop::params::conf_dir
+  $mapred_user = $hdp-hadoop::params::mapred_user  
+  $hdfs_user = $hdp-hadoop::params::hdfs_user  
+
+  anchor{'hdp-hadoop::begin':} 
+  anchor{'hdp-hadoop::end':} 
+
+  if ('uninstalled' in $service_states) {
+    hdp-hadoop::package { 'hadoop':
+      ensure => 'uninstalled'
+    }
+
+    hdp::directory_recursive_create { $hadoop_config_dir:
+      service_state => $service_state,
+      force => true
+    }
+
+    Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Package<||> -> Hdp::Directory_recursive_create[$hadoop_config_dir] -> Anchor['hdp-hadoop::end']
+  } else {
+    
+    hdp-hadoop::package { 'hadoop':}
+
+
+    hdp::directory_recursive_create { $hadoop_config_dir:
+      service_state => $service_state,
+      force => true
+    }
+ 
+    hdp::user{ $hdfs_user:}
+    hdp::user { $mapred_user:}
+
+    $logdirprefix = $hdp-hadoop::params::hadoop_logdirprefix
+    hdp::directory_recursive_create { $logdirprefix: 
+        owner => 'root'
+    }
+    $piddirprefix = $hdp-hadoop::params::hadoop_piddirprefix
+    hdp::directory_recursive_create { $piddirprefix: 
+        owner => 'root'
+    }
+ 
+    #taskcontroller.cfg properties conditional on security
+    if ($hdp::params::security_enabled == true) {
+      file { "${hdp::params::hadoop_bin}/task-controller":
+        owner   => 'root',
+        group   => $hdp::params::hadoop_user_group,
+        mode    => '6050',
+        require => Hdp-hadoop::Package['hadoop'],
+        before  => Anchor['hdp-hadoop::end']
+      }
+      $tc_owner = 'root'
+      $tc_mode = '0400'
+    } else {
+      $tc_owner = $hdfs_user
+      $tc_mode = undef
+    }
+    hdp-hadoop::configfile { 'taskcontroller.cfg' :
+      tag   => 'common',
+      owner => $tc_owner,
+      mode  => $tc_mode
+    }
+
+    $template_files = ['hadoop-env.sh','core-site.xml','hadoop-policy.xml','health_check','capacity-scheduler.xml','commons-logging.properties','log4j.properties','mapred-queue-acls.xml','slaves']
+    hdp-hadoop::configfile { $template_files:
+      tag   => 'common', 
+      owner => $hdfs_user
+    }
+    
+    hdp-hadoop::configfile { 'hadoop-metrics2.properties' : 
+      tag   => 'common', 
+      owner => $hdfs_user,
+    }
+
+    hdp-hadoop::configfile { 'mapred-site.xml': 
+      tag => 'common', 
+      owner => $mapred_user
+    }
+
+    Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Package<||> ->  Hdp::Directory_recursive_create[$hadoop_config_dir] ->  Hdp::User<|title == $hdfs_user or title == $mapred_user|> 
+    -> Hdp-hadoop::Configfile<|tag == 'common'|> -> Anchor['hdp-hadoop::end']
+    Anchor['hdp-hadoop::begin'] -> Hdp::Directory_recursive_create[$logdirprefix] -> Anchor['hdp-hadoop::end']
+    Anchor['hdp-hadoop::begin'] -> Hdp::Directory_recursive_create[$piddirprefix] -> Anchor['hdp-hadoop::end']
+  }
+}
+
+class hdp-hadoop::enable-ganglia()
+{
+  Hdp-hadoop::Configfile<|title  == 'hadoop-metrics2.properties'|>{template_tag => 'GANGLIA'}
+}
+
+###config file helper
+define hdp-hadoop::configfile(
+  $owner = undef,
+  $hadoop_conf_dir = $hdp-hadoop::params::conf_dir,
+  $mode = undef,
+  $namenode_host = undef,
+  $jtnode_host = undef,
+  $snamenode_host = undef,
+  $template_tag = undef,
+  $size = undef, #TODO: deprecate
+  $sizes = []
+) 
+{
+  #TODO: may need to be fixed 
+  if ($jtnode_host == undef) {
+    $calc_jtnode_host = $namenode_host
+  } else {
+    $calc_jtnode_host = $jtnode_host 
+  }
+ 
+  #only set 32 if theer is a 32 bit component and no 64 bit components
+  if (64 in $sizes) {
+    $common_size = 64
+  } elsif (32 in $sizes) {
+    $common_size = 32
+  } else {
+    $common_size = 6
+  }
+  
+  hdp::configfile { "${hadoop_conf_dir}/${name}":
+    component      => 'hadoop',
+    owner          => $owner,
+    mode           => $mode,
+    namenode_host  => $namenode_host,
+    snamenode_host => $snamenode_host,
+    jtnode_host    => $calc_jtnode_host,
+    template_tag   => $template_tag,
+    size           => $common_size
+  }
+}
+
+#####
+define hdp-hadoop::exec-hadoop(
+  $command,
+  $unless = undef,
+  $refreshonly = undef,
+  $echo_yes = false,
+  $kinit_override = false,
+  $tries = 1,
+  $timeout = 900,
+  $try_sleep = undef,
+  $user = undef,
+  $logoutput = undef
+)
+{
+  include hdp-hadoop::params
+  $security_enabled = $hdp::params::security_enabled
+  $conf_dir = $hdp-hadoop::params::conf_dir
+  $hdfs_user = $hdp-hadoop::params::hdfs_user
+
+  if ($user == undef) {
+    $run_user = $hdfs_user
+  } else {
+    $run_user = $user
+  }
+
+  if (($security_enabled == true) and ($kinit_override == false)) {
+    #TODO: may figure out so dont need to call kinit if auth in caceh already
+    if ($run_user in [$hdfs_user,'root']) {
+      $keytab = "${hdp-hadoop::params::keytab_path}/${hdfs_user}.headless.keytab"
+      $principal = $hdfs_user
+    } else {
+      $keytab = "${hdp-hadoop::params::keytab_path}/${user}.headless.keytab" 
+      $principal = $user
+    }
+    $kinit_if_needed = "/usr/kerberos/bin/kinit  -kt ${keytab} ${principal}; "
+  } else {
+    $kinit_if_needed = ""
+  }
+ 
+  if ($echo_yes == true) {
+    $cmd = "${kinit_if_needed}yes Y | hadoop --config ${conf_dir} ${command}"
+  } else {
+    $cmd = "${kinit_if_needed}hadoop --config ${conf_dir} ${command}"
+  }
+
+  hdp::exec { $cmd:
+    command     => $cmd,
+    user        => $run_user,
+    unless      => $unless,
+    refreshonly => $refreshonly,
+    tries       => $tries,
+    timeout     => $timeout,
+    try_sleep   => $try_sleep,
+    logoutput   => $logoutput
+  }
+}

+ 94 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/jobtracker.pp

@@ -0,0 +1,94 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hadoop::jobtracker(
+  $service_state = $hdp::params::cluster_service_state,
+  $opts = {}
+) inherits hdp-hadoop::params
+{
+  $hdp::params::service_exists['hdp-hadoop::jobtracker'] = true
+  Hdp-hadoop::Common<||>{service_states +> $service_state}
+  Hdp-hadoop::Package<||>{include_64_bit => true}
+  Hdp-hadoop::Configfile<||>{sizes +> 64}
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {
+    $mapred_user = $hdp-hadoop::params::mapred_user
+    $mapred_local_dir = $hdp-hadoop::params::mapred_local_dir 
+  
+    #adds package, users and directories, and common hadoop configs
+    include hdp-hadoop::initialize
+
+    if ( ($service_state == 'installed_and_configured') and
+         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
+      $masterHost = $kerberos_adminclient_host[0]
+      hdp::download_keytab { 'jobtracker_service_keytab' :
+        masterhost => $masterHost,
+        keytabdst => "${$keytab_path}/jt.service.keytab",
+        keytabfile => 'jt.service.keytab',
+        owner => $hdp-hadoop::params::mapred_user
+      }
+    }
+     
+    hdp-hadoop::jobtracker::create_local_dirs { $mapred_local_dir: 
+      service_state => $service_state
+    }
+
+    #TODO: cleanup 
+    Hdp-Hadoop::Configfile<||>{jtnode_host => $hdp::params::host_address}
+
+    #TODO: do we keep precondition here?
+    if ($service_state == 'running' and $hdp-hadoop::params::use_preconditions == true) {
+      class { 'hdp-hadoop::hdfs::service_check':
+        before => Hdp-hadoop::Service['jobtracker'],
+        require => Class['hdp-hadoop']
+      }
+    }
+
+    hdp-hadoop::service{ 'jobtracker':
+      ensure       => $service_state,
+      user         => $mapred_user
+    }
+  
+    hdp-hadoop::service{ 'historyserver':
+      ensure         => $service_state,
+      user           => $mapred_user,
+      create_pid_dir => false,
+      create_log_dir => false
+    }
+
+    #top level does not need anchors
+    Class['hdp-hadoop'] -> Hdp-hadoop::Service['jobtracker'] -> Hdp-hadoop::Service['historyserver']
+    Hdp-hadoop::Jobtracker::Create_local_dirs<||> -> Hdp-hadoop::Service['jobtracker']
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
+
+define hdp-hadoop::jobtracker::create_local_dirs($service_state)
+{
+    $dirs = hdp_array_from_comma_list($name)
+    hdp::directory_recursive_create { $dirs :
+      owner => $hdp-hadoop::params::mapred_user,
+      mode => '0755',
+      service_state => $service_state,
+      force => true
+    }
+}

+ 29 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/jobtracker/service_check.pp

@@ -0,0 +1,29 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hadoop::jobtracker::service_check()
+{
+  hdp-hadoop::exec-hadoop { 'jobtracker::service_check':
+    command   => 'job -list',
+    tries     => 3,
+    try_sleep => 5,
+    user => $hdp::params::smokeuser
+  }
+}

+ 75 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/mapred/service_check.pp

@@ -0,0 +1,75 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hadoop::mapred::service_check() 
+{
+  $smoke_test_user = $hdp::params::smokeuser
+  $jar_location = $hdp::params::hadoop_jar_location
+  $input_file = 'mapredsmokeinput'
+  $output_file = "mapredsmokeoutput"
+
+  $cleanup_cmd = "dfs -rmr ${output_file} ${input_file}"
+  #cleanup put below to handle retries; if retrying there wil be a stale file that needs cleanup; exit code is fn of second command
+  $create_file_cmd = "$cleanup_cmd ; hadoop dfs -put /etc/passwd ${input_file} " #TODO: inconsistent that second comamnd needs hadoop
+  $test_cmd = "fs -test -e ${output_file}" 
+  $run_wordcount_job = "jar ${jar_location}/hadoop-examples.jar  wordcount ${input_file} ${output_file}"
+  
+  anchor { 'hdp-hadoop::mapred::service_check::begin':}
+
+  hdp-hadoop::exec-hadoop { 'mapred::service_check::create_file':
+    command   => $create_file_cmd,
+    tries     => 1,
+    try_sleep => 5,
+    require   => Anchor['hdp-hadoop::mapred::service_check::begin'],
+  #  notify    => Hdp-hadoop::Exec-hadoop['mapred::service_check::run_wordcount'],
+    user      => $smoke_test_user
+  }
+
+  hdp-hadoop::exec-hadoop { 'mapred::service_check::run_wordcount':
+    command   => $run_wordcount_job,
+    tries     => 1,
+    try_sleep => 5,
+    require   => Hdp-hadoop::Exec-hadoop['mapred::service_check::create_file'],
+    notify    => Hdp-hadoop::Exec-hadoop['mapred::service_check::test'],
+    user      => $smoke_test_user,
+    logoutput => "true"
+  }
+
+#  exec { 'runjob':
+#    command   => "hadoop jar ${jar_location}/hadoop-examples.jar  wordcount ${input_file} ${output_file}",
+#    tries     => 1,
+#    try_sleep => 5,
+#    require   => Hdp-hadoop::Exec-hadoop['mapred::service_check::create_file'],
+#    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+#    notify    => Hdp-hadoop::Exec-hadoop['mapred::service_check::test'],
+#    logoutput => "true",
+#    user      => $smoke_test_user
+#  }
+
+  hdp-hadoop::exec-hadoop { 'mapred::service_check::test':
+    command     => $test_cmd,
+    refreshonly => true,
+    require     => Hdp-hadoop::Exec-hadoop['mapred::service_check::run_wordcount'],
+    before      => Anchor['hdp-hadoop::mapred::service_check::end'], #TODO: remove after testing
+    user        => $smoke_test_user
+  }
+  
+  anchor{ 'hdp-hadoop::mapred::service_check::end':}
+}

+ 189 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode.pp

@@ -0,0 +1,189 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hadoop::namenode(
+  $service_state = $hdp::params::cluster_service_state,
+  $slave_hosts = [],
+  $format = true,
+  $opts = {}
+) inherits hdp-hadoop::params
+{
+  $hdp::params::service_exists['hdp-hadoop::namenode'] = true
+
+  Hdp-hadoop::Common<||>{service_states +> $service_state}
+  Hdp-hadoop::Package<||>{include_64_bit => true}
+  Hdp-hadoop::Configfile<||>{sizes +> 64}
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
+    $dfs_name_dir = $hdp-hadoop::params::dfs_name_dir
+  
+    #adds package, users and directories, and common hadoop configs
+    include hdp-hadoop::initialize
+
+    if ( ($service_state == 'installed_and_configured') and 
+         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
+      $masterHost = $kerberos_adminclient_host[0]
+      hdp::download_keytab { 'namenode_service_keytab' :
+        masterhost => $masterHost,
+        keytabdst => "${$keytab_path}/nn.service.keytab",
+        keytabfile => 'nn.service.keytab',
+        owner => $hdp-hadoop::params::hdfs_user
+      }
+      hdp::download_keytab { 'namenode_hdfs_headless_keytab' :   
+        masterhost => $masterHost,
+        keytabdst => "${$keytab_path}/hdfs.headless.keytab",
+        keytabfile => 'hdfs.headless.keytab', 
+        owner => $hdp-hadoop::params::hdfs_user, 
+        hostnameInPrincipals => 'no'
+      }
+      hdp::download_keytab { 'namenode_spnego_keytab' :   
+        masterhost => $masterHost,
+        keytabdst => "${$keytab_path}/spnego.service.keytab",
+        keytabfile => 'spnego.service.keytab', 
+        owner => $hdp-hadoop::params::hdfs_user, 
+        mode => '0440',
+        group => 'hadoop'
+      }
+    }
+ 
+    hdp-hadoop::namenode::create_name_dirs { $dfs_name_dir: 
+      service_state => $service_state
+    }
+   
+    Hdp-Hadoop::Configfile<||>{namenode_host => $hdp::params::host_address}
+    Hdp::Configfile<||>{namenode_host => $hdp::params::host_address} #for components other than hadoop (e.g., hbase) 
+  
+    if ($service_state == 'running' and $format == true) {
+      class {'hdp-hadoop::namenode::format' : }
+    }
+
+    hdp-hadoop::service{ 'namenode':
+      ensure       => $service_state,
+      user         => $hdp-hadoop::params::hdfs_user,
+      initial_wait => hdp_option_value($opts,'wait')
+    }
+
+    hdp-hadoop::namenode::create_app_directories { 'create_app_directories' :
+       service_state => $service_state
+    }
+
+    #top level does not need anchors
+    Class['hdp-hadoop'] ->  Hdp-hadoop::Service['namenode']
+    Hdp-hadoop::Namenode::Create_name_dirs<||> -> Hdp-hadoop::Service['namenode'] 
+    Hdp-hadoop::Service['namenode'] -> Hdp-hadoop::Namenode::Create_app_directories<||>
+    if ($service_state == 'running' and $format == true) {
+      Class['hdp-hadoop'] -> Class['hdp-hadoop::namenode::format'] -> Hdp-hadoop::Service['namenode']
+      Hdp-hadoop::Namenode::Create_name_dirs<||> -> Class['hdp-hadoop::namenode::format']
+    } 
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
+
+define hdp-hadoop::namenode::create_name_dirs($service_state)
+{
+  $dirs = hdp_array_from_comma_list($name)
+  hdp::directory_recursive_create { $dirs :
+    owner => $hdp-hadoop::params::hdfs_user,
+    mode => '0755',
+    service_state => $service_state,
+    force => true
+  }
+}
+
+define hdp-hadoop::namenode::create_app_directories($service_state)
+{
+  if ($service_state == 'running') {
+    $smoke_test_user = $hdp::params::smokeuser
+    hdp-hadoop::hdfs::directory{ "/user/${smoke_test_user}":
+      service_state => $service_state,
+      owner => $smoke_test_user,
+      mode  => '770',
+      recursive_chmod => true
+    }
+   
+    hdp-hadoop::hdfs::directory{ "/tmp" :
+      service_state => $service_state,
+      owner => $hdp-hadoop::params::hdfs_user,
+      mode => '777'
+    }
+
+    hdp-hadoop::hdfs::directory{ '/mapred' :
+      service_state => $service_state,
+      owner         => $hdp-hadoop::params::mapred_user
+    }
+    hdp-hadoop::hdfs::directory{ '/mapred/system' :
+      service_state => $service_state,
+      owner         => $hdp-hadoop::params::mapred_user
+    }
+    Hdp-hadoop::Hdfs::Directory['/mapred'] -> Hdp-hadoop::Hdfs::Directory['/mapred/system']
+
+    if ($hdp::params::hbase_master_host != "") {
+      $hdfs_root_dir = $hdp::params::hbase_hdfs_root_dir
+      hdp-hadoop::hdfs::directory { $hdfs_root_dir:
+        owner         => $hdp::params::hbase_user,
+        service_state => $service_state
+      }
+    }
+
+    if ($hdp::params::hive_server_host != "") {
+      $hive_user = $hdp::params::hive_user
+
+      hdp-hadoop::hdfs::directory{ '/apps/hive/warehouse':
+        service_state   => $service_state,
+        owner            => $hive_user,
+        mode             => '777',
+        recursive_chmod  => true
+      }
+      hdp-hadoop::hdfs::directory{ "/user/${hive_user}":
+        service_state => $service_state,
+        owner         => $hive_user
+      }
+    }
+
+    if ($hdp::params::oozie_server != "") {
+      $oozie_user = $hdp::params::oozie_user
+      hdp-hadoop::hdfs::directory{ '/user/oozie':
+        service_state => $service_state,
+        owner => $oozie_user,
+        mode  => '770',
+        recursive_chmod => true
+      }
+    }
+    
+    if ($hdp::params::templeton_server_host != "") {
+      $templeton_user = $hdp::params::templeton_user
+      hdp-hadoop::hdfs::directory{ '/user/templeton':
+        service_state => $service_state,
+        owner => $templeton_user,
+        mode  => '755',
+        recursive_chmod => true
+      }
+
+      hdp-hadoop::hdfs::directory{ '/apps/templeton':
+        service_state => $service_state,
+        owner => $templeton_user,
+        mode  => '755',
+        recursive_chmod => true
+      }
+    }
+  }
+}

+ 57 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode/format.pp

@@ -0,0 +1,57 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hadoop::namenode::format(
+  $force = false
+)
+{
+  $mark_dir = $hdp-hadoop::params::namenode_formatted_mark_dir
+  $dfs_name_dir = $hdp-hadoop::params::dfs_name_dir
+  $hdfs_user = $hdp::params::hdfs_user
+  $hadoop_conf_dir = $hdp-hadoop::params::conf_dir
+
+  if ($force == true) {
+      hdp-hadoop::exec-hadoop { 'namenode -format' :
+      command => 'namenode -format',
+      kinit_override => true,
+      notify  => Hdp::Exec['set namenode mark']
+    }
+  } else {
+      file { '/tmp/checkForFormat.sh':
+      ensure => present,
+      source => "puppet:///modules/hdp-hadoop/checkForFormat.sh",
+      mode => '0755'
+    }
+
+    exec { '/tmp/checkForFormat.sh':
+      command   => "sh /tmp/checkForFormat.sh ${hdfs_user} ${hadoop_conf_dir} ${mark_dir} ${dfs_name_dir} ",
+      unless   => "test -d ${mark_dir}",
+      require   => File['/tmp/checkForFormat.sh'],
+      path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+      logoutput => "true",
+      notify   => Hdp::Exec['set namenode mark']
+    }
+  }
+
+  hdp::exec { 'set namenode mark' :
+    command     => "mkdir -p ${mark_dir}",
+    refreshonly => true
+  }
+}

+ 28 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode/service_check.pp

@@ -0,0 +1,28 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hadoop::namenode::service_check()
+{
+  hdp-hadoop::exec-hadoop { 'namenode::service_check':
+    command   => 'dfs -ls /',
+    tries     => 3,
+    try_sleep => 5
+  }
+}

+ 44 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/package.pp

@@ -0,0 +1,44 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#singleton, but using define so can use collections to override params
+define hdp-hadoop::package(
+  $ensure = 'present',
+  $include_32_bit = false,
+  $include_64_bit = false
+)
+{
+  #just use 32 if its specifically requested and no 64 bit requests
+  if ($include_32_bit == true) and ($include_64_bit != true) {
+    $size = 32
+  } else  {
+    $size = 64
+  }
+  $package = "hadoop ${size}"
+  $lzo_enabled = $hdp::params::lzo_enabled
+
+  hdp::package{ $package:
+    ensure       => $ensure,
+    package_type => 'hadoop',
+    size         => $size,
+    lzo_needed   => $lzo_enabled
+  }
+  anchor{ 'hdp-hadoop::package::helper::begin': } -> Hdp::Package[$package] -> anchor{ 'hdp-hadoop::package::helper::end': }
+}

+ 172 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp

@@ -0,0 +1,172 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hadoop::params(
+) inherits hdp::params 
+{
+
+  ##TODO: for testing in masterless mode
+  $use_preconditions = false
+  ####  
+  $conf_dir = $hdp::params::hadoop_conf_dir 
+
+  ####### users
+
+  $mapred_user = $hdp::params::mapred_user
+  $hdfs_user = $hdp::params::hdfs_user
+  
+  ##### security related
+  $keytab_path = hdp_default("keytab_path","/etc/security/keytabs")
+ 
+  if ($hdp::params::security_enabled == true) {
+    $enable_security_authorization = true
+    $security_type = "kerberos"
+    $task_controller = "org.apache.hadoop.mapred.LinuxTaskController"
+    $dfs_datanode_address = 1019
+    $dfs_datanode_http_address = 1022
+  } else {
+    $enable_security_authorization = false
+    $security_type = "simple"
+    $task_controller = "org.apache.hadoop.mapred.DefaultTaskController"
+    $dfs_datanode_address = 50010
+    $dfs_datanode_http_address = 50075
+  }
+
+  ### hadoop-env
+  
+  $dtnode_heapsize = hdp_default("hadoop/hadoop-env/dtnode_heapsize","1024m")
+  $ttnode_heapsize = hdp_default("hadoop/hadoop-env/ttnode_heapsize","1024m")
+
+  $hadoop_heapsize = hdp_default("hadoop/hadoop-env/hadoop_heapsize","1024m")
+
+  $hadoop_logdirprefix = hdp_default("hadoop/hadoop-env/hadoop_logdirprefix","/var/log/hadoop")
+
+  $hadoop_piddirprefix = hdp_default("hadoop/hadoop-env/hadoop_piddirprefix","/var/run/hadoop")
+  $run_dir = $hadoop_piddirprefix
+
+  $namenode_formatted_mark_dir = "${run_dir}/hdfs/namenode/formatted/"
+
+  $jtnode_heapsize = hdp_default("hadoop/hadoop-env/jtnode_heapsize","1024m")
+
+  $jtnode_opt_maxnewsize = hdp_default("hadoop/hadoop-env/jtnode_opt_maxnewsize","200m")
+
+  $jtnode_opt_newsize = hdp_default("hadoop/hadoop-env/jtnode_opt_newsize","200m")
+
+  $namenode_heapsize = hdp_default("hadoop/hadoop-env/namenode_heapsize","1024m")
+
+  $namenode_opt_maxnewsize = hdp_default("hadoop/hadoop-env/namenode_opt_maxnewsize","640m")
+
+  $namenode_opt_newsize = hdp_default("hadoop/hadoop-env/namenode_opt_newsize","640m")
+  
+  ### compression related
+  if (($hdp::params::lzo_enabled == true) and ($hdp::params::snappy_enabled == true)) {
+    $mapred_compress_map_output = true
+    $compression_codecs =  "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.SnappyCodec"
+    $mapred_map_output_compression_codec = "org.apache.hadoop.io.compress.SnappyCodec"
+  } elsif ($hdp::params::snappy_enabled == true) {
+    $mapred_compress_map_output = true
+    $compression_codecs = "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec" 
+    $mapred_map_output_compression_codec = "org.apache.hadoop.io.compress.SnappyCodec"
+  } elsif ($hdp::params::lzo_enabled == true) {
+    $mapred_compress_map_output = true
+    $compression_codecs = "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec"
+    $mapred_map_output_compression_codec = "com.hadoop.compression.lzo.LzoCodec"
+  } else { 
+    $mapred_compress_map_output = false
+    $compression_codecs = "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec"
+    $mapred_map_output_compression_codec = "org.apache.hadoop.io.compress.DefaultCodec"
+  }
+
+  ### core-site
+  $fs_checkpoint_dir = hdp_default("hadoop/core-site/fs_checkpoint_dir","/tmp/dfs/namesecondary")
+
+  $proxyuser_group = hdp_default("hadoop/core-site/proxyuser_group","users")
+
+  ### hdfs-site
+  $datanode_du_reserved = hdp_default("hadoop/hdfs-site/datanode_du_reserved",1073741824)
+
+  $dfs_block_local_path_access_user = hdp_default("hadoop/hdfs-site/dfs_block_local_path_access_user","hbase")
+
+  $dfs_data_dir = $hdp::params::dfs_data_dir
+
+  $dfs_datanode_data_dir_perm = hdp_default("hadoop/hdfs-site/dfs_datanode_data_dir_perm",750)
+
+  $dfs_datanode_failed_volume_tolerated = hdp_default("hadoop/hdfs-site/dfs_datanode_failed_volume_tolerated",0)
+
+  $dfs_exclude = hdp_default("hadoop/hdfs-site/dfs_exclude","dfs.exclude")
+
+  $dfs_include = hdp_default("hadoop/hdfs-site/dfs_include","dfs.include")
+  
+  $dfs_name_dir = hdp_default("hadoop/hdfs-site/dfs_name_dir","/tmp/hadoop-hdfs/dfs/name")
+  
+  $dfs_replication = hdp_default("hadoop/hdfs-site/dfs_replication",3)
+
+  $dfs_support_append = hdp_default("hadoop/hdfs-site/dfs_support_append",true)
+
+  $dfs_webhdfs_enabled = hdp_default("hadoop/hdfs-site/dfs_webhdfs_enabled","false")
+
+
+ ######### mapred #######
+   ### mapred-site
+
+  $mapred_system_dir = '/mapred/system'
+
+  $io_sort_mb = hdp_default("hadoop/mapred-site/io_sort_mb","200")
+
+  $io_sort_spill_percent = hdp_default("hadoop/mapred-site/io_sort_spill_percent","0.9")
+
+  $mapred_child_java_opts_sz = hdp_default("hadoop/mapred-site/mapred_child_java_opts_sz","-Xmx768m")
+
+  $mapred_cluster_map_mem_mb = hdp_default("hadoop/mapred-site/mapred_cluster_map_mem_mb","-1")
+
+  $mapred_cluster_max_map_mem_mb = hdp_default("hadoop/mapred-site/mapred_cluster_max_map_mem_mb","-1")
+
+  $mapred_cluster_max_red_mem_mb = hdp_default("hadoop/mapred-site/mapred_cluster_max_red_mem_mb","-1")
+
+  $mapred_cluster_red_mem_mb = hdp_default("hadoop/mapred-site/mapred_cluster_red_mem_mb","-1")
+
+  $mapred_hosts_exclude = hdp_default("hadoop/mapred-site/mapred_hosts_exclude","mapred.exclude")
+
+  $mapred_hosts_include = hdp_default("hadoop/mapred-site/mapred_hosts_include","mapred.include")
+
+  $mapred_job_map_mem_mb = hdp_default("hadoop/mapred-site/mapred_job_map_mem_mb","-1")
+
+  $mapred_job_red_mem_mb = hdp_default("hadoop/mapred-site/mapred_job_red_mem_mb","-1")
+
+  $mapred_jobstatus_dir = hdp_default("hadoop/mapred-site/mapred_jobstatus_dir","file:////mapred/jobstatus")
+
+  $mapred_local_dir = hdp_default("hadoop/mapred-site/mapred_local_dir","/tmp/hadoop-mapred/mapred/local")
+   
+  $mapred_map_tasks_max = hdp_default("hadoop/mapred-site/mapred_map_tasks_max",4)
+
+  $mapred_red_tasks_max = hdp_default("hadoop/mapred-site/mapred_red_tasks_max",4)
+
+  $mapreduce_userlog_retainhours = hdp_default("hadoop/mapred-site/mapreduce_userlog_retainhours",24)
+
+  $maxtasks_per_job = hdp_default("hadoop/mapred-site/maxtasks_per_job","-1")
+
+  $scheduler_name = hdp_default("hadoop/mapred-site/scheduler_name","org.apache.hadoop.mapred.CapacityTaskScheduler")
+
+  #### health_check
+
+  $security_enabled = $hdp::params::security_enabled
+
+  $task_bin_exe = hdp_default("hadoop/health_check/task_bin_exe")
+}

+ 118 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/service.pp

@@ -0,0 +1,118 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+define hdp-hadoop::service(
+  $ensure = 'running',
+  $user,
+  $initial_wait = undef,
+  $create_pid_dir = true,
+  $create_log_dir = true
+)
+{
+
+  $security_enabled = $hdp::params::security_enabled
+
+  #NOTE does not work if namenode and datanode are on same host 
+  $pid_dir = "${hdp-hadoop::params::hadoop_piddirprefix}/${user}"
+  
+  if (($security_enabled == true) and ($name == 'datanode')) {
+    $run_as_root = true
+  } else {       
+    $run_as_root = false
+  }
+
+  if (($security_enabled == true) and ($name == 'datanode')) {
+    $hdfs_user = $hdp::params::hdfs_user
+    $pid_file = "${hdp-hadoop::params::hadoop_piddirprefix}/${hdfs_user}/hadoop-${hdfs_user}-${name}.pid"
+  } else {
+    $pid_file = "${pid_dir}/hadoop-${user}-${name}.pid"
+  } 
+
+  $log_dir = "${hdp-hadoop::params::hadoop_logdirprefix}/${user}"
+  $hadoop_daemon = "${hdp::params::hadoop_bin}/hadoop-daemon.sh"
+   
+  $cmd = "${hadoop_daemon} --config ${hdp-hadoop::params::conf_dir}"
+  if ($ensure == 'running') {
+    if ($run_as_root == true) {
+      $daemon_cmd = "${cmd} start ${name}"
+    } else {
+      $daemon_cmd = "su - ${user} -c  '${cmd} start ${name}'"
+    }
+    $service_is_up = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
+  } elsif ($ensure == 'stopped') {
+    if ($run_as_root == true) {
+      $daemon_cmd = "${cmd} stop ${name}"
+    } else {
+      $daemon_cmd = "su - ${user} -c  '${cmd} stop ${name}'"
+    }
+    $service_is_up = undef
+  } else {
+    $daemon_cmd = undef
+  }
+ 
+  if ($create_pid_dir == true) {
+    hdp::directory_recursive_create { $pid_dir: 
+      owner       => $user,
+      context_tag => 'hadoop_service',
+      service_state => $service_state,
+      force => true
+    }
+  }
+  
+  if ($create_log_dir == true) {
+    hdp::directory_recursive_create { $log_dir: 
+      owner       => $user,
+      context_tag => 'hadoop_service',
+      service_state => $service_state,
+      force => true
+    }
+  }
+  if ($daemon_cmd != undef) {  
+    hdp::exec { $daemon_cmd:
+      command      => $daemon_cmd,
+      unless       => $service_is_up,
+      initial_wait => $initial_wait
+    }
+  }
+
+  anchor{"hdp-hadoop::service::${name}::begin":}
+  anchor{"hdp-hadoop::service::${name}::end":}
+  if ($daemon_cmd != undef) {
+    Anchor["hdp-hadoop::service::${name}::begin"] -> Hdp::Exec[$daemon_cmd] -> Anchor["hdp-hadoop::service::${name}::end"]
+
+    if ($create_pid_dir == true) {
+      Anchor["hdp-hadoop::service::${name}::begin"] -> Hdp::Directory_recursive_create[$pid_dir] -> Hdp::Exec[$daemon_cmd] 
+    }
+     if ($create_log_dir == true) {
+      Anchor["hdp-hadoop::service::${name}::begin"] -> Hdp::Directory_recursive_create[$log_dir] -> Hdp::Exec[$daemon_cmd] 
+    }
+  }
+  if ($ensure == 'running') {
+    #TODO: look at Puppet resource retry and retry_sleep
+    #TODO: can make sleep contingent on $name
+    $sleep = 5
+    $post_check = "sleep ${sleep}; ${service_is_up}"
+    hdp::exec { $post_check:
+      command => $post_check,
+      unless  => $service_is_up
+    }
+    Hdp::Exec[$daemon_cmd] -> Hdp::Exec[$post_check] -> Anchor["hdp-hadoop::service::${name}::end"]
+  }  
+}

+ 24 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/jobtracker-conn.pp

@@ -0,0 +1,24 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hadoop::slave::jobtracker-conn($jobtracker_host)
+{
+  Hdp-Hadoop::Configfile<||>{jtnode_host => $jobtracker_host}
+}

+ 27 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/master-conn.pp

@@ -0,0 +1,27 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hadoop::slave::master-conn($master_host)
+{
+  Hdp-Hadoop::Configfile<||>{
+    namenode_host => $master_host,
+    jtnode_host   => $master_host
+  }
+}

+ 27 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/namenode-conn.pp

@@ -0,0 +1,27 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#TODO: this might be replaced by just using hdp::namenode-conn
+class hdp-hadoop::slave::namenode-conn($namenode_host)
+{
+  #TODO: check if can get rido of both
+  Hdp-Hadoop::Configfile<||>{namenode_host => $namenode_host}
+  Hdp::Configfile<||>{namenode_host => $namenode_host} #for components other than hadoop (e.g., hbase) 
+}

+ 46 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/smoketest.pp

@@ -0,0 +1,46 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hadoop::smoketest(
+  $opts={}
+)
+{
+  #TODO: put in wait
+  #TODO: look for better way to compute outname
+  $date_format = '"%M%d%y"'
+  $outname = inline_template("<%=  `date +${date_format}`.chomp %>")
+
+  #TODO: hardwired to run on namenode and to use user hdfs
+
+  $put = "dfs -put /etc/passwd passwd-${outname}"
+  $exec = "jar /usr/share/hadoop/hadoop-examples-*.jar wordcount passwd-${outname} ${outname}.out"
+  $result = "fs -test -e ${outname}.out /dev/null 2>&1"
+  anchor{ "hdp-hadoop::smoketest::begin" :} ->
+  hdp-hadoop::exec-hadoop{ $put:
+    command => $put
+  } ->
+  hdp-hadoop::exec-hadoop{ $exec:
+    command =>  $exec
+  } ->
+  hdp-hadoop::exec-hadoop{ $result:
+    command =>  $result
+  } ->
+  anchor{ "hdp-hadoop::smoketest::end" :}
+}

+ 98 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/snamenode.pp

@@ -0,0 +1,98 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hadoop::snamenode(
+  $service_state = $hdp::params::cluster_service_state,
+  $opts = {}
+) inherits hdp-hadoop::params  
+{
+  $hdp::params::service_exists['hdp-hadoop::snamenode'] = true
+
+  Hdp-hadoop::Common<||>{service_states +> $service_state}
+  Hdp-hadoop::Package<||>{include_64_bit => true}
+  Hdp-hadoop::Configfile<||>{sizes +> 64}
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {
+    $fs_checkpoint_dir = $hdp-hadoop::params::fs_checkpoint_dir
+  
+    #adds package, users and directories, and common hadoop configs
+    include hdp-hadoop::initialize
+
+    if ( ($service_state == 'installed_and_configured') and
+         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
+      if ($hdp::params::service_exists['hdp-hadoop::namenode'] != true) {
+        $masterHost = $kerberos_adminclient_host[0]
+        hdp::download_keytab { 'snamenode_service_keytab' :
+          masterhost => $masterHost,
+          keytabdst => "${$keytab_path}/nn.service.keytab",
+          keytabfile => 'nn.service.keytab',
+          owner => $hdp-hadoop::params::hdfs_user
+        }
+        hdp::download_keytab { 'snamenode_spnego_keytab' :   
+          masterhost => $masterHost,
+          keytabdst => "${$keytab_path}/spnego.service.keytab",
+          keytabfile => 'spnego.service.keytab', 
+          owner => $hdp-hadoop::params::hdfs_user,
+          mode => '0440',
+          group => 'hadoop'
+        }
+      }
+    }
+ 
+    Hdp-Hadoop::Configfile<||>{snamenode_host => $hdp::params::host_address}
+  
+    hdp-hadoop::snamenode::create_name_dirs { $fs_checkpoint_dir: 
+      service_state => $service_state
+    }
+    
+    if ($hdp::params::service_exists['hdp-hadoop::namenode'] == true) {
+      $create_pid_dir = false
+      $create_log_dir = false
+    } else {
+      $create_pid_dir = true
+      $create_log_dir = true
+    }
+    
+    hdp-hadoop::service{ 'secondarynamenode':
+      ensure         => $service_state,
+      user           => $hdp-hadoop::params::hdfs_user,
+      create_pid_dir => $create_pid_dir,
+      create_log_dir => $create_log_dir
+    }
+  
+    #top level does not need anchors
+    Class['hdp-hadoop'] -> Hdp-hadoop::Service['secondarynamenode']
+    Hdp-hadoop::Namenode::Create_name_dirs<||> -> Hdp-hadoop::Service['secondarynamenode']
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
+
+define hdp-hadoop::snamenode::create_name_dirs($service_state)
+{
+   $dirs = hdp_array_from_comma_list($name)
+   hdp::directory_recursive_create { $dirs :
+     owner => $hdp-hadoop::params::hdfs_user,
+     mode => '0755',
+     service_state => $service_state,
+     force => true
+  }
+}

+ 94 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/tasktracker.pp

@@ -0,0 +1,94 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hadoop::tasktracker(
+  $service_state = $hdp::params::cluster_service_state,
+  $opts = {}
+) inherits hdp-hadoop::params
+{
+  $hdp::params::service_exists['hdp-hadoop::tasktracker'] = true
+
+  Hdp-hadoop::Common<||>{service_states +> $service_state}
+
+  if ($hdp::params::use_32_bits_on_slaves == true) {
+    Hdp-hadoop::Package<||>{include_32_bit => true}
+    Hdp-hadoop::Configfile<||>{sizes +> 32}
+  } else {
+    Hdp-hadoop::Package<||>{include_64_bit => true}
+    Hdp-hadoop::Configfile<||>{sizes +> 64}
+  }
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
+    $mapred_local_dir = $hdp-hadoop::params::mapred_local_dir
+  
+    #adds package, users and directories, and common hadoop configs
+    include hdp-hadoop::initialize
+
+    if ( ($service_state == 'installed_and_configured') and
+         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
+      $masterHost = $kerberos_adminclient_host[0]
+      hdp::download_keytab { 'tasktracker_service_keytab' :
+        masterhost => $masterHost,
+        keytabdst => "${$keytab_path}/tt.service.keytab",
+        keytabfile => 'tt.service.keytab',
+        owner => $hdp-hadoop::params::mapred_user
+      }
+    }
+  
+    hdp-hadoop::tasktracker::create_local_dirs { $mapred_local_dir: 
+      service_state => $service_state
+    }
+    
+    if ($hdp::params::service_exists['hdp-hadoop::jobtracker'] == true) {
+      $create_pid_dir = false
+      $create_log_dir = false
+    } else {
+      $create_pid_dir = true
+      $create_log_dir = true
+    }
+
+    hdp-hadoop::service{ 'tasktracker':
+      ensure => $service_state,
+      user   => $hdp-hadoop::params::mapred_user,
+      create_pid_dir => $create_pid_dir,
+      create_log_dir => $create_log_dir
+    }
+  
+    #top level does not need anchors
+    Class['hdp-hadoop'] -> Hdp-hadoop::Service['tasktracker']
+    Hdp-hadoop::Tasktracker::Create_local_dirs<||> -> Hdp-hadoop::Service['tasktracker']
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
+
+define hdp-hadoop::tasktracker::create_local_dirs($service_state)
+{
+  if ($hdp::params::service_exists['hdp-hadoop::jobtracker'] != true) {
+    $dirs = hdp_array_from_comma_list($name)
+    hdp::directory_recursive_create { $dirs :
+      owner => $hdp-hadoop::params::mapred_user,
+      mode => '0755',
+      service_state => $service_state,
+      force => true
+    }
+  }
+}

+ 68 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/capacity-scheduler.xml.erb

@@ -0,0 +1,68 @@
+<?xml version="1.0"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- This is the configuration file for the resource manager in Hadoop. -->
+<!-- You can configure various scheduling parameters related to queues. -->
+<!-- The properties for a queue follow a naming convention,such as, -->
+<!-- mapred.capacity-scheduler.queue.<queue-name>.property-name. -->
+
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
+
+
+  <property>
+    <name>mapred.capacity-scheduler.queue.default.capacity</name>
+    <value>100</value>
+    <description>Percentage of the number of slots in the cluster that are
+      guaranteed to be available for jobs in this queue.
+    </description>    
+  </property>
+  
+  <property>
+    <name>mapred.capacity-scheduler.queue.default.supports-priority</name>
+    <value>false</value>
+    <description>If true, priorities of jobs will be taken into 
+      account in scheduling decisions.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.capacity-scheduler.queue.default.minimum-user-limit-percent</name>
+    <value>100</value>
+    <description> Each queue enforces a limit on the percentage of resources 
+    allocated to a user at any given time, if there is competition for them. 
+    This user limit can vary between a minimum and maximum value. The former
+    depends on the number of users who have submitted jobs, and the latter is
+    set to this property value. For example, suppose the value of this 
+    property is 25. If two users have submitted jobs to a queue, no single 
+    user can use more than 50% of the queue resources. If a third user submits
+    a job, no single user can use more than 33% of the queue resources. With 4 
+    or more users, no user can use more than 25% of the queue's resources. A 
+    value of 100 implies no user limits are imposed. 
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.capacity-scheduler.queue.default.maximum-initialized-jobs-per-user</name>
+    <value>25</value>
+    <description>The maximum number of jobs to be pre-initialized for a user
+    of the job queue.
+    </description>
+  </property>
+
+</configuration>

+ 25 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/commons-logging.properties.erb

@@ -0,0 +1,25 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+#Logging Implementation
+
+#Log4J
+org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger
+
+#JDK Logger
+#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger

+ 254 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/core-site.xml.erb

@@ -0,0 +1,254 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+ <!--
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+ 
+        http://www.apache.org/licenses/LICENSE-2.0
+ 
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+ -->
+ 
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
+
+<!-- i/o properties -->
+
+  <property>
+    <name>io.file.buffer.size</name>
+    <value>131072</value>
+    <description>The size of buffer for use in sequence files.
+  The size of this buffer should probably be a multiple of hardware
+  page size (4096 on Intel x86), and it determines how much data is
+  buffered during read and write operations.</description>
+  </property>
+
+  <property>
+    <name>io.serializations</name>
+    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
+  </property>
+
+  <property>
+    <name>io.compression.codecs</name>
+    <value><%=scope.function_hdp_template_var("compression_codecs")%></value>
+    <description>A list of the compression codec classes that can be used
+                 for compression/decompression.</description>
+  </property>
+
+  <property>
+    <name>io.compression.codec.lzo.class</name>
+    <value>com.hadoop.compression.lzo.LzoCodec</value>
+    <description>The implementation for lzo codec.</description>
+  </property>
+
+<!-- file system properties -->
+
+  <property>
+    <name>fs.default.name</name>
+    <!-- cluster variant -->
+    <value>hdfs://<%=scope.function_hdp_host("namenode_host")%>:8020</value>
+    <description>The name of the default file system.  Either the
+  literal string "local" or a host:port for NDFS.</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>fs.trash.interval</name>
+    <value>360</value>
+    <description>Number of minutes between trash checkpoints.
+  If zero, the trash feature is disabled.
+  </description>
+  </property>
+
+  <property>
+    <name>fs.checkpoint.dir</name>
+    <value><%=scope.function_hdp_template_var("fs_checkpoint_dir")%></value>
+    <description>Determines where on the local filesystem the DFS secondary
+        name node should store the temporary images to merge.
+        If this is a comma-delimited list of directories then the image is
+        replicated in all of the directories for redundancy.
+    </description>
+  </property>
+
+  <property>
+    <name>fs.checkpoint.edits.dir</name>
+    <value>${fs.checkpoint.dir}</value>
+    <description>Determines where on the local filesystem the DFS secondary
+        name node should store the temporary edits to merge.
+        If this is a comma-delimited list of directoires then teh edits is
+        replicated in all of the directoires for redundancy.
+        Default value is same as fs.checkpoint.dir
+    </description>
+  </property>
+
+  <property>
+    <name>fs.checkpoint.period</name>
+    <value>21600</value>
+    <description>The number of seconds between two periodic checkpoints.
+  </description>
+  </property>
+
+  <property>
+    <name>fs.checkpoint.size</name>
+    <value>536870912</value>
+    <description>The size of the current edit log (in bytes) that triggers
+       a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
+  </description>
+  </property>
+
+  <!-- ipc properties: copied from kryptonite configuration -->
+  <property>
+    <name>ipc.client.idlethreshold</name>
+    <value>8000</value>
+    <description>Defines the threshold number of connections after which
+               connections will be inspected for idleness.
+  </description>
+  </property>
+
+  <property>
+    <name>ipc.client.connection.maxidletime</name>
+    <value>30000</value>
+    <description>The maximum time after which a client will bring down the
+               connection to the server.
+  </description>
+  </property>
+
+  <property>
+    <name>ipc.client.connect.max.retries</name>
+    <value>50</value>
+    <description>Defines the maximum number of retries for IPC connections.</description>
+  </property>
+
+  <!-- Web Interface Configuration -->
+  <property>
+    <name>webinterface.private.actions</name>
+    <value>false</value>
+    <description> If set to true, the web interfaces of JT and NN may contain
+                actions, such as kill job, delete file, etc., that should
+                not be exposed to public. Enable this option if the interfaces
+                are only reachable by those who have the right authorization.
+  </description>
+  </property>
+
+ <property>
+   <name>hadoop.security.authentication</name>
+   <value><%=scope.function_hdp_template_var("security_type")%></value>
+   <description>
+   Set the authentication for the cluster. Valid values are: simple or
+   kerberos.
+   </description>
+ </property>
+<property>
+  <name>hadoop.security.authorization</name>
+  <value><%=scope.function_hdp_template_var("enable_security_authorization")%></value>
+  <description>
+     Enable authorization for different protocols.
+  </description>
+</property>
+
+  <property>
+    <name>hadoop.security.auth_to_local</name>
+    <value>
+        RULE:[2:$1@$0]([jt]t@.*<%=scope.function_hdp_template_var("kerberos_domain")%>)s/.*/<%=scope.function_hdp_template_var("mapred_user")%>/
+        RULE:[2:$1@$0]([nd]n@.*<%=scope.function_hdp_template_var("kerberos_domain")%>)s/.*/<%=scope.function_hdp_template_var("hdfs_user")%>/
+        RULE:[2:$1@$0](hm@.*<%=scope.function_hdp_template_var("kerberos_domain")%>)s/.*/<%=scope.function_hdp_template_var("hbase_user")%>/
+        RULE:[2:$1@$0](rs@.*<%=scope.function_hdp_template_var("kerberos_domain")%>)s/.*/<%=scope.function_hdp_template_var("hbase_user")%>/
+        DEFAULT</value>
+<description>The mapping from kerberos principal names to local OS user names.
+  So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
+  "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
+The translations rules have 3 sections:
+      base     filter    substitution
+The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
+
+[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
+[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
+[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
+
+The filter is a regex in parens that must the generated string for the rule to apply.
+
+"(.*%admin)" will take any string that ends in "%admin"
+"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
+
+Finally, the substitution is a sed rule to translate a regex into a fixed string.
+
+"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
+"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
+"s/X/Y/g" replaces all of the "X" in the name with "Y"
+
+So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+To also translate the names with a second component, you'd make the rules:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+RULE:[2:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
+
+RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
+DEFAULT
+    </description>
+  </property>
+
+<property>
+  <name>hadoop.proxyuser.<%=scope.function_hdp_user("hive_user")%>.groups</name>
+  <value><%=scope.function_hdp_template_var("proxyuser_group")%></value>
+  <description>
+     Proxy group for Hadoop.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.proxyuser.<%=scope.function_hdp_user("hive_user")%>.hosts</name>
+  <value><%=scope.function_hdp_host("hive_server_host")%></value>
+  <description>
+     Proxy host for Hadoop.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.proxyuser.<%=scope.function_hdp_user("oozie_user")%>.groups</name>
+  <value><%=scope.function_hdp_template_var("proxyuser_group")%></value>
+  <description>
+     Proxy group for Hadoop.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.proxyuser.<%=scope.function_hdp_user("oozie_user")%>.hosts</name>
+  <value><%=scope.function_hdp_host("oozie_server")%></value>
+  <description>
+     Proxy host for Hadoop.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.proxyuser.<%=scope.function_hdp_user("templeton_user")%>.groups</name>
+  <value><%=scope.function_hdp_template_var("proxyuser_group")%></value>
+  <description>
+    Proxy group for templeton.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.proxyuser.<%=scope.function_hdp_user("templeton_user")%>.hosts</name>
+  <value><%=scope.function_hdp_host("templeton_server_host")%></value>
+  <description>
+    Proxy host for templeton.
+  </description>
+</property>
+</configuration>

+ 89 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb

@@ -0,0 +1,89 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME=<%=scope.function_hdp_java_home()%>
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-<%=scope.function_hdp_template_var("conf_dir")%>}
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+#export HADOOP_HEAPSIZE=
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms<%=scope.function_hdp_template_var("namenode_heapsize")%>"
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("namenode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("namenode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms<%=scope.function_hdp_template_var("namenode_heapsize")%> -Xmx<%=scope.function_hdp_template_var("namenode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("jtnode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("jtnode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx<%=scope.function_hdp_template_var("jtnode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx<%=scope.function_hdp_template_var("ttnode_heapsize")%> -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+HADOOP_DATANODE_OPTS="-Xmx<%=scope.function_hdp_template_var("dtnode_heapsize")%> -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx<%=scope.function_hdp_template_var("hadoop_heapsize")%> ${HADOOP_BALANCER_OPTS}"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("namenode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("namenode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx<%=scope.function_hdp_template_var("namenode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx128m ${HADOOP_CLIENT_OPTS}"
+#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData ${HADOOP_JAVA_PLATFORM_OPTS}"
+
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER=<%=scope.function_hdp_template_var("hdfs_user")%>
+
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER
+
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR=<%=scope.function_hdp_template_var("hadoop_piddirprefix")%>/$USER
+export HADOOP_SECURE_DN_PID_DIR=<%=scope.function_hdp_template_var("hadoop_piddirprefix")%>/$HADOOP_SECURE_DN_USER
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+
+# export HADOOP_NICENESS=10

+ 37 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties-GANGLIA.erb

@@ -0,0 +1,37 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# syntax: [prefix].[source|sink|jmx].[instance].[options]
+# See package.html for org.apache.hadoop.metrics2 for details
+
+*.period=60
+
+*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+*.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+# Hook up to the server
+namenode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8661
+datanode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+jobtracker.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8662
+tasktracker.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+maptask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+reducetask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+

+ 37 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties.erb

@@ -0,0 +1,37 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# syntax: [prefix].[source|sink|jmx].[instance].[options]
+# See package.html for org.apache.hadoop.metrics2 for details
+
+*.period=60
+
+*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+*.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+# Hook up to the server
+namenode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8661
+datanode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+jobtracker.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8662
+tasktracker.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+maptask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+reducetask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+

+ 134 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-policy.xml.erb

@@ -0,0 +1,134 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+  <property>
+    <name>security.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientProtocol, which is used by user code
+    via the DistributedFileSystem.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.client.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
+    for block recovery.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for DatanodeProtocol, which is used by datanodes to
+    communicate with the namenode.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.inter.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
+    for updating generation timestamp.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.namenode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for NamenodeProtocol, the protocol used by the secondary
+    namenode to communicate with the namenode.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.inter.tracker.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for InterTrackerProtocol, used by the tasktrackers to
+    communicate with the jobtracker.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.job.submission.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for JobSubmissionProtocol, used by job clients to
+    communciate with the jobtracker for job submission, querying job status etc.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.task.umbilical.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
+    tasks to communicate with the parent tasktracker.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+ <property>
+    <name>security.admin.operations.protocol.acl</name>
+    <value> <%=scope.function_hdp_template_var("hdfs_user")%></value>
+    <description>ACL for AdminOperationsProtocol. Used for admin commands.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
+    <value> <%=scope.function_hdp_template_var("hdfs_user")%></value>
+    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
+    users mappings. The ACL is a comma-separated list of user and
+    group names. The user and group list is separated by a blank. For
+    e.g. "alice,bob users,wheel".  A special value of "*" means all
+    users are allowed.</description>
+  </property>
+
+<property>
+    <name>security.refresh.policy.protocol.acl</name>
+    <value> <%=scope.function_hdp_template_var("hdfs_user")%></value>
+    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
+    dfsadmin and mradmin commands to refresh the security policy in-effect.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+
+</configuration>

+ 118 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/health_check.erb

@@ -0,0 +1,118 @@
+#!/bin/bash
+#
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+err=0;
+
+function check_disks {
+
+  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
+    fsdev=""
+    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
+    if [ -z "$fsdev" ] ; then
+      msg_="$msg_ $m(u)"
+    else
+      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
+    fi
+  done
+
+  if [ -z "$msg_" ] ; then
+    echo "disks ok" ; exit 0
+  else
+    echo "$msg_" ; exit 2
+  fi
+
+}
+
+function check_taskcontroller {
+  if [ "<%=scope.function_hdp_template_var("security_enabled")%>" == "true" ]; then
+    perm=`stat -c %a:%U:%G <%=scope.function_hdp_template_var("task_bin_exe")%> 2>/dev/null`
+    if [ $? -eq 0 ] && [ "$perm" == "6050:root:hadoop" ] ; then
+      echo "taskcontroller ok"
+    else
+      echo 'check taskcontroller' ; exit 1
+    fi
+  fi
+}
+
+function check_jetty {
+  hname=`hostname`
+  jmx=`curl -s -S -m 5 "http://$hname:50060/jmx?qry=Hadoop:service=TaskTracker,name=ShuffleServerMetrics" 2>/dev/null` ;
+  if [ $? -eq 0 ] ; then
+    e=`echo $jmx | awk '/shuffle_exceptions_caught/ {printf"%d",$2}'` ;
+    e=${e:-0} # no jmx servlet ?
+    if [ $e -gt 10 ] ; then
+      echo "check jetty: shuffle_exceptions=$e" ; exit 1
+    else
+      echo "jetty ok"
+    fi
+  else
+    echo "check jetty: ping failed" ; exit 1
+  fi
+}
+
+function check_link {
+  snmp=/usr/bin/snmpwalk
+  if [ -e $snmp ] ; then
+    $snmp -t 5 -Oe  -Oq  -Os -v 1 -c public localhost if | \
+    awk ' {
+      split($1,a,".") ;
+      if ( a[1] == "ifIndex" ) { ifIndex[a[2]] = $2 }
+      if ( a[1] == "ifDescr" ) { ifDescr[a[2]] = $2 }
+      if ( a[1] == "ifType" ) { ifType[a[2]] = $2 }
+      if ( a[1] == "ifSpeed" ) { ifSpeed[a[2]] = $2 }
+      if ( a[1] == "ifAdminStatus" ) { ifAdminStatus[a[2]] = $2 }
+      if ( a[1] == "ifOperStatus" ) { ifOperStatus[a[2]] = $2 }
+    }
+    END {
+      up=0;
+      for (i in ifIndex ) {
+      if ( ifType[i] == 6 && ifAdminStatus[i] == 1 && ifOperStatus[i] == 1 && ifSpeed[i] == 1000000000 ) {
+      up=i;
+      }
+      }
+      if ( up == 0 ) { print "check link" ; exit 2 }
+      else { print ifDescr[up],"ok" }
+    }'
+    exit $? ;
+  fi
+}
+
+# Run all checks
+# Disabled 'check_link' for now... 
+for check in disks taskcontroller jetty; do
+  msg=`check_${check}` ;
+  if [ $? -eq 0 ] ; then
+    ok_msg="$ok_msg$msg,"
+  else
+    err_msg="$err_msg$msg,"
+  fi
+done
+
+if [ ! -z "$err_msg" ] ; then
+  echo -n "ERROR $err_msg "
+fi
+if [ ! -z "$ok_msg" ] ; then
+  echo -n "OK: $ok_msg"
+fi
+
+echo
+
+# Success!
+exit 0

+ 171 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/log4j.properties.erb

@@ -0,0 +1,171 @@
+# Copyright 2011 The Apache Software Foundation
+# 
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this 
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+#Security audit appender
+#
+hadoop.security.logger=INFO,console
+log4j.category.SecurityLogger=${hadoop.security.logger}
+hadoop.security.log.file=SecurityAuth.audit
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender 
+log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
+
+#
+# hdfs audit logging
+#
+hdfs.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# mapred audit logging
+#
+mapred.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
+log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
+log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# Rolling File Appender
+#
+
+#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+#log4j.appender.RFA.MaxFileSize=1MB
+#log4j.appender.RFA.MaxBackupIndex=30
+
+#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+# Custom Logging levels
+
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+
+#
+# Job Summary Appender 
+#
+# Use following logger to send summary to separate file defined by 
+# hadoop.mapreduce.jobsummary.log.file rolled daily:
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+# 
+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
+log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+log4j.appender.JSA.DatePattern=.yyyy-MM-dd
+log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
+log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false

+ 39 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/mapred-queue-acls.xml.erb

@@ -0,0 +1,39 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- mapred-queue-acls.xml -->
+<configuration>
+
+
+<!-- queue default -->
+
+  <property>
+    <name>mapred.queue.default.acl-submit-job</name>
+    <value>*</value>
+  </property>
+
+  <property>
+    <name>mapred.queue.default.acl-administer-jobs</name>
+    <value>*</value>
+  </property>
+
+  <!-- END ACLs -->
+
+</configuration>

+ 531 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/mapred-site.xml.erb

@@ -0,0 +1,531 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
+
+<!-- i/o properties -->
+
+  <property>
+    <name>io.sort.mb</name>
+    <value><%=scope.function_hdp_template_var("io_sort_mb")%></value>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>io.sort.record.percent</name>
+    <value>.2</value>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>io.sort.spill.percent</name>
+    <value><%=scope.function_hdp_template_var("io_sort_spill_percent")%></value>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>io.sort.factor</name>
+    <value>100</value>
+    <description>No description</description>
+  </property>
+
+<!-- map/reduce properties -->
+
+<property>
+  <name>mapred.tasktracker.tasks.sleeptime-before-sigkill</name>
+  <value>250</value>
+  <description>Normally, this is the amount of time before killing
+  processes, and the recommended-default is 5.000 seconds - a value of
+  5000 here.  In this case, we are using it solely to blast tasks before
+  killing them, and killing them very quickly (1/4 second) to guarantee
+  that we do not leave VMs around for later jobs.
+  </description>
+</property>
+
+  <property>
+    <name>mapred.job.tracker.handler.count</name>
+    <value>50</value>
+    <description>
+    The number of server threads for the JobTracker. This should be roughly
+    4% of the number of tasktracker nodes.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.system.dir</name>
+    <value><%=scope.function_hdp_template_var("mapred_system_dir")%></value>
+    <description>No description</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>mapred.job.tracker</name>
+    <!-- cluster variant -->
+    <value><%=scope.function_hdp_host("jtnode_host")%>:50300</value>
+    <description>No description</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>mapred.job.tracker.http.address</name>
+    <!-- cluster variant -->
+    <value><%=scope.function_hdp_host("jtnode_host")%>:50030</value>
+    <description>No description</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <!-- cluster specific -->
+    <name>mapred.local.dir</name>
+    <value><%=scope.function_hdp_template_var("mapred_local_dir")%></value>
+    <description>No description</description>
+    <final>true</final>
+  </property>
+
+  <property>
+  <name>mapreduce.cluster.administrators</name>
+  <value> hadoop</value>
+  </property>
+
+  <property>
+    <name>mapred.reduce.parallel.copies</name>
+    <value>30</value>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>mapred.tasktracker.map.tasks.maximum</name>
+    <value><%=scope.function_hdp_template_var("mapred_map_tasks_max")%></value>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>mapred.tasktracker.reduce.tasks.maximum</name>
+    <value><%=scope.function_hdp_template_var("mapred_red_tasks_max")%></value>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>tasktracker.http.threads</name>
+    <value>50</value>
+  </property>
+
+  <property>
+    <name>mapred.map.tasks.speculative.execution</name>
+    <value>false</value>
+    <description>If true, then multiple instances of some map tasks
+               may be executed in parallel.</description>
+  </property>
+
+  <property>
+    <name>mapred.reduce.tasks.speculative.execution</name>
+    <value>false</value>
+    <description>If true, then multiple instances of some reduce tasks
+               may be executed in parallel.</description>
+  </property>
+
+  <property>
+    <name>mapred.reduce.slowstart.completed.maps</name>
+    <value>0.05</value>
+  </property>
+
+  <property>
+    <name>mapred.inmem.merge.threshold</name>
+    <value>1000</value>
+    <description>The threshold, in terms of the number of files
+  for the in-memory merge process. When we accumulate threshold number of files
+  we initiate the in-memory merge and spill to disk. A value of 0 or less than
+  0 indicates we want to DON'T have any threshold and instead depend only on
+  the ramfs's memory consumption to trigger the merge.
+  </description>
+  </property>
+
+  <property>
+    <name>mapred.job.shuffle.merge.percent</name>
+    <value>0.66</value>
+    <description>The usage threshold at which an in-memory merge will be
+  initiated, expressed as a percentage of the total memory allocated to
+  storing in-memory map outputs, as defined by
+  mapred.job.shuffle.input.buffer.percent.
+  </description>
+  </property>
+
+  <property>
+    <name>mapred.job.shuffle.input.buffer.percent</name>
+    <value>0.7</value>
+    <description>The percentage of memory to be allocated from the maximum heap
+  size to storing map outputs during the shuffle.
+  </description>
+  </property>
+
+  <property>
+    <name>mapred.map.output.compression.codec</name>
+    <value><%=scope.function_hdp_template_var("mapred_map_output_compression_codec")%></value>
+    <description>If the map outputs are compressed, how should they be
+      compressed
+    </description>
+  </property>
+
+<property>
+  <name>mapred.output.compression.type</name>
+  <value>BLOCK</value>
+  <description>If the job outputs are to compressed as SequenceFiles, how should
+               they be compressed? Should be one of NONE, RECORD or BLOCK.
+  </description>
+</property>
+
+
+  <property>
+    <name>mapred.jobtracker.completeuserjobs.maximum</name>
+    <value>0</value>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.taskScheduler</name>
+    <value><%=scope.function_hdp_template_var("scheduler_name")%></value>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.restart.recover</name>
+    <value>false</value>
+    <description>"true" to enable (job) recovery upon restart,
+               "false" to start afresh
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.job.reduce.input.buffer.percent</name>
+    <value>0.0</value>
+    <description>The percentage of memory- relative to the maximum heap size- to
+  retain map outputs during the reduce. When the shuffle is concluded, any
+  remaining map outputs in memory must consume less than this threshold before
+  the reduce can begin.
+  </description>
+  </property>
+
+ <property>
+  <name>mapreduce.reduce.input.limit</name>
+  <value>10737418240</value>
+  <description>The limit on the input size of the reduce. (This value
+  is 10 Gb.)  If the estimated input size of the reduce is greater than
+  this value, job is failed. A value of -1 means that there is no limit
+  set. </description>
+</property>
+
+
+  <!-- copied from kryptonite configuration -->
+  <property>
+    <name>mapred.compress.map.output</name>
+    <value><%=scope.function_hdp_template_var("mapred_compress_map_output")%></value>
+  </property>
+
+
+  <property>
+    <name>mapred.task.timeout</name>
+    <value>600000</value>
+    <description>The number of milliseconds before a task will be
+  terminated if it neither reads an input, writes an output, nor
+  updates its status string.
+  </description>
+  </property>
+
+  <property>
+    <name>jetty.connector</name>
+    <value>org.mortbay.jetty.nio.SelectChannelConnector</value>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>mapred.task.tracker.task-controller</name>
+    <value><%=scope.function_hdp_template_var("task_controller")%></value>
+   <description>
+     TaskController which is used to launch and manage task execution.
+  </description>
+  </property>
+
+  <property>
+    <name>mapred.child.root.logger</name>
+    <value>INFO,TLA</value>
+  </property>
+
+  <property>
+    <name>mapred.child.java.opts</name>
+    <value>-server <%=scope.function_hdp_template_var("mapred_child_java_opts_sz")%> -Djava.net.preferIPv4Stack=true</value>
+
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>mapred.cluster.map.memory.mb</name>
+    <value><%=scope.function_hdp_template_var("mapred_cluster_map_mem_mb")%></value>
+  </property>
+
+  <property>
+    <name>mapred.cluster.reduce.memory.mb</name>
+    <value><%=scope.function_hdp_template_var("mapred_cluster_red_mem_mb")%></value>
+  </property>
+
+  <property>
+    <name>mapred.job.map.memory.mb</name>
+    <value><%=scope.function_hdp_template_var("mapred_job_map_mem_mb")%></value>
+  </property>
+
+  <property>
+    <name>mapred.job.reduce.memory.mb</name>
+    <value><%=scope.function_hdp_template_var("mapred_job_red_mem_mb")%></value>
+  </property>
+
+  <property>
+    <name>mapred.cluster.max.map.memory.mb</name>
+    <value><%=scope.function_hdp_template_var("mapred_cluster_max_map_mem_mb")%></value>
+  </property>
+
+  <property>
+    <name>mapred.cluster.max.reduce.memory.mb</name>
+    <value><%=scope.function_hdp_template_var("mapred_cluster_max_red_mem_mb")%></value>
+  </property>
+
+<property>
+  <name>mapred.hosts</name>
+  <value><%=scope.function_hdp_template_var("conf_dir")%>/<%=scope.function_hdp_template_var("mapred_hosts_include")%></value>
+</property>
+
+<property>
+  <name>mapred.hosts.exclude</name>
+  <value><%=scope.function_hdp_template_var("conf_dir")%>/<%=scope.function_hdp_template_var("mapred_hosts_exclude")%></value>
+</property>
+
+<property>
+  <name>mapred.max.tracker.blacklists</name>
+  <value>16</value>
+  <description>
+    if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
+  </description>
+</property>
+
+<property>
+  <name>mapred.healthChecker.script.path</name>
+  <value><%=scope.function_hdp_template_var("conf_dir")%>/health_check</value>
+</property>
+
+<property>
+  <name>mapred.healthChecker.interval</name>
+  <value>135000</value>
+</property>
+
+<property>
+  <name>mapred.healthChecker.script.timeout</name>
+  <value>60000</value>
+</property>
+
+<property>
+  <name>mapred.job.tracker.persist.jobstatus.active</name>
+  <value>false</value>
+  <description>Indicates if persistency of job status information is
+  active or not.
+  </description>
+</property>
+
+<property>
+  <name>mapred.job.tracker.persist.jobstatus.hours</name>
+  <value>1</value>
+  <description>The number of hours job status information is persisted in DFS.
+    The job status information will be available after it drops of the memory
+    queue and between jobtracker restarts. With a zero value the job status
+    information is not persisted at all in DFS.
+  </description>
+</property>
+
+<property>
+  <name>mapred.job.tracker.persist.jobstatus.dir</name>
+  <value><%=scope.function_hdp_template_var("mapred_jobstatus_dir")%></value>
+  <description>The directory where the job status information is persisted
+   in a file system to be available after it drops of the memory queue and
+   between jobtracker restarts.
+  </description>
+</property>
+
+<property>
+  <name>mapred.jobtracker.retirejob.check</name>
+  <value>10000</value>
+</property>
+
+<property>
+  <name>mapred.jobtracker.retirejob.interval</name>
+  <value>0</value>
+</property>
+
+<property>
+  <name>mapred.job.tracker.history.completed.location</name>
+  <value>/mapred/history/done</value>
+  <description>No description</description>
+</property>
+
+<property>
+  <name>mapred.task.maxvmem</name>
+  <value></value>
+  <final>true</final>
+   <description>No description</description>
+</property>
+
+<property>
+  <name>mapred.jobtracker.maxtasks.per.job</name>
+  <value><%=scope.function_hdp_template_var("maxtasks_per_job")%></value>
+  <final>true</final>
+  <description>The maximum number of tasks for a single job.
+  A value of -1 indicates that there is no maximum.  </description>
+</property>
+
+<property>
+  <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
+  <value>false</value>
+</property>
+
+<property>
+  <name>mapred.userlog.retain.hours</name>
+  <value><%=scope.function_hdp_template_var("mapreduce_userlog_retainhours")%></value>
+</property>
+
+<property>
+  <name>mapred.job.reuse.jvm.num.tasks</name>
+  <value>1</value>
+  <description>
+    How many tasks to run per jvm. If set to -1, there is no limit
+  </description>
+  <final>true</final>
+</property>
+
+<property>
+  <name>mapreduce.jobtracker.kerberos.principal</name>
+  <value>jt/_HOST@<%=scope.function_hdp_template_var("kerberos_domain")%></value>
+  <description>
+      JT user name key.
+ </description>
+</property>
+
+<property>
+  <name>mapreduce.tasktracker.kerberos.principal</name>
+   <value>tt/_HOST@<%=scope.function_hdp_template_var("kerberos_domain")%></value>
+  <description>
+       tt user name key. "_HOST" is replaced by the host name of the task tracker.
+   </description>
+</property>
+
+
+  <property>
+    <name>hadoop.job.history.user.location</name>
+    <value>none</value>
+    <final>true</final>
+  </property>
+
+
+ <property>
+   <name>mapreduce.jobtracker.keytab.file</name>
+   <value><%=scope.function_hdp_template_var("keytab_path")%>/jt.service.keytab</value>
+   <description>
+       The keytab for the jobtracker principal.
+   </description>
+
+</property>
+
+ <property>
+   <name>mapreduce.tasktracker.keytab.file</name>
+   <value><%=scope.function_hdp_template_var("keytab_path")%>/tt.service.keytab</value>
+    <description>The filename of the keytab for the task tracker</description>
+ </property>
+
+ <property>
+   <name>mapreduce.jobtracker.staging.root.dir</name>
+   <value>/user</value>
+ <description>The Path prefix for where the staging directories should be placed. The next level is always the user's
+   name. It is a path in the default file system.</description>
+ </property>
+
+ <property>
+      <name>mapreduce.tasktracker.group</name>
+      <value>hadoop</value>
+      <description>The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.</description>
+
+ </property>
+
+  <property>
+    <name>mapreduce.jobtracker.split.metainfo.maxsize</name>
+    <value>50000000</value>
+    <final>true</final>
+     <description>If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
+    initialize.
+   </description>
+  </property>
+  <property>
+    <name>mapreduce.history.server.embedded</name>
+    <value>false</value>
+    <description>Should job history server be embedded within Job tracker
+process</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>mapreduce.history.server.http.address</name>
+    <!-- cluster variant -->
+    <value><%=scope.function_hdp_host("jtnode_host")%>:51111</value>
+    <description>Http address of the history server</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>mapreduce.jobhistory.kerberos.principal</name>
+    <!-- cluster variant -->
+  <value>jt/_HOST@<%=scope.function_hdp_template_var("kerberos_domain")%></value>
+    <description>Job history user name key. (must map to same user as JT
+user)</description>
+  </property>
+
+ <property>
+   <name>mapreduce.jobhistory.keytab.file</name>
+    <!-- cluster variant -->
+   <value><%=scope.function_hdp_template_var("keytab_path")%>/jt.service.keytab</value>
+   <description>The keytab for the job history server principal.</description>
+ </property>
+
+<property>
+  <name>mapred.jobtracker.blacklist.fault-timeout-window</name>
+  <value>180</value>
+  <description>
+    3-hour sliding window (value is in minutes)
+  </description>
+</property>
+
+<property>
+  <name>mapred.jobtracker.blacklist.fault-bucket-width</name>
+  <value>15</value>
+  <description>
+    15-minute bucket size (value is in minutes)
+  </description>
+</property>
+
+<property>
+  <name>mapred.queue.names</name>
+  <value>default</value>
+  <description> Comma separated list of queues configured for this jobtracker.</description>
+</property>
+
+</configuration>

+ 3 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/slaves.erb

@@ -0,0 +1,3 @@
+<%h=scope.function_hdp_host("slave_hosts"); (h.kind_of?(Array) ? h : []).each do |host|-%>
+<%= host %>
+<%end-%>

+ 20 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/taskcontroller.cfg.erb

@@ -0,0 +1,20 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+mapred.local.dir=<%=scope.function_hdp_template_var("mapred_local_dir")%>
+mapreduce.tasktracker.group=hadoop
+hadoop.log.dir=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/<%=scope.function_hdp_template_var("mapred_user")%>

+ 65 - 0
ambari-agent/src/main/python/ambari_agent/security.py

@@ -0,0 +1,65 @@
+import httplib
+import urllib2
+from urllib2 import Request
+import socket
+import ssl
+import os
+import logging
+from subprocess import Popen, PIPE
+import AmbariConfig
+
+logger = logging.getLogger()
+
+GEN_AGENT_KEY="openssl req -new -newkey rsa:1024 -nodes -keyout %(keysdir)s/%(hostname)s.key\
+	-subj /OU=%(hostname)s/\
+        -out %(keysdir)s/%(hostname)s.csr"
+
+class CertificateManager():
+    def __init__(self, config):
+        self.config = config
+        self.keysdir = self.config.get('security', 'keysdir')
+        self.server_crt=self.config.get('security', 'server_crt')
+    def getAgentKeyName(self):
+        return self.keysdir + os.sep + socket.gethostname() + ".key"
+    def getAgentCrtName(self):
+        return self.keysdir + os.sep + socket.gethostname() + ".key"
+    def getSrvrCrtName(self):
+        return self.keysdir + os.sep + "ca.crt"
+        
+    def checkCertExists(self):
+        
+        server_crt_exists = os.path.exists(self.getSrvrCrtName())
+        
+        if not server_crt_exists:
+            logger.info("Server certicate not exists, downloading")
+            self.loadSrvrCrt()
+        else:
+            logger.info("Server certicate exists, ok")
+            
+        agent_crt_exists = os.path.exists(self.getAgentCrtName())
+        
+        logger.info(self.getAgentCrtName())
+        
+        if not agent_crt_exists:
+            logger.info("Agent certicate not exists, generating request")
+            self.genAgentCrtReq()
+        else:
+            logger.info("Agent certicate exists, ok")
+            
+        
+    def loadSrvrCrt(self):
+      get_ca_url = self.config.get('server', 'url') + '/cert/ca/'
+      stream = urllib2.urlopen(get_ca_url)
+      response = stream.read()
+      stream.close()
+      srvr_crt_f = open(self.getSrvrCrtName(), 'w+')
+      srvr_crt_f.write(response)
+      
+    def genAgentCrtReq(self):
+        generate_script = GEN_AGENT_KEY % {'hostname': socket.gethostname(),
+                                           'keysdir' : self.config.get('security', 'keysdir')}
+        logger.info(generate_script)
+        pp = Popen([generate_script], shell=True, stdout=PIPE)
+
+    def initSecurity(self):
+        self.checkCertExists()

+ 40 - 0
ambari-project/pom.xml

@@ -109,6 +109,46 @@
         <artifactId>derby</artifactId>
         <version>10.9.1.0</version>
       </dependency>
+      <dependency>
+        <groupId>org.springframework.security</groupId>
+        <artifactId>spring-security-core</artifactId>
+        <version>3.1.2.RELEASE</version>
+      </dependency>
+      <dependency>
+        <groupId>org.springframework.security</groupId>
+        <artifactId>spring-security-config</artifactId>
+        <version>3.1.2.RELEASE</version>
+      </dependency>
+      <dependency>
+        <groupId>org.springframework.security</groupId>
+        <artifactId>spring-security-web</artifactId>
+        <version>3.1.2.RELEASE</version>
+      </dependency>
+      <dependency>
+        <groupId>org.springframework.security</groupId>
+        <artifactId>spring-security-ldap</artifactId>
+        <version>3.1.2.RELEASE</version>
+      </dependency>
+      <dependency>
+        <groupId>org.springframework.ldap</groupId>
+        <artifactId>spring-ldap-core</artifactId>
+        <version>1.3.1.RELEASE</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.directory.server</groupId>
+        <artifactId>apacheds-all</artifactId>
+        <version>1.5.5</version>
+      </dependency>
+      <dependency>
+        <groupId>org.slf4j</groupId>
+        <artifactId>slf4j-api</artifactId>
+        <version>1.6.6</version>
+      </dependency>
+      <dependency>
+        <groupId>org.slf4j</groupId>
+        <artifactId>slf4j-log4j12</artifactId>
+        <version>1.0.1</version>
+      </dependency>
       <dependency>
         <groupId>org.eclipse.persistence</groupId>
         <artifactId>eclipselink</artifactId>

+ 40 - 0
ambari-server/pom.xml

@@ -76,6 +76,46 @@
       <groupId>org.apache.derby</groupId>
       <artifactId>derby</artifactId>
     </dependency>
+    <dependency>
+      <groupId>org.springframework.security</groupId>
+      <artifactId>spring-security-core</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.springframework.security</groupId>
+      <artifactId>spring-security-config</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.springframework.security</groupId>
+      <artifactId>spring-security-web</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.springframework.security</groupId>
+      <artifactId>spring-security-ldap</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.springframework.ldap</groupId>
+      <artifactId>spring-ldap-core</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.directory.server</groupId>
+      <artifactId>apacheds-all</artifactId>
+    </dependency>
+    <!--<dependency>-->
+      <!--<groupId>org.apache.directory.shared</groupId>-->
+      <!--<artifactId>shared-ldap</artifactId>-->
+    <!--</dependency>-->
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-log4j12</artifactId>
+    </dependency>
+    <dependency>
+    <groupId>log4j</groupId>
+    <artifactId>log4j</artifactId>
+    </dependency>
     <dependency>
       <groupId>org.eclipse.persistence</groupId>
       <artifactId>eclipselink</artifactId>

+ 6 - 0
ambari-server/src/main/assemblies/server.xml

@@ -28,6 +28,12 @@
       <source>${project.build.directory}/${artifact.artifactId}-${artifact.version}.jar</source>
       <outputDirectory>ambari-server-${project.version}/lib</outputDirectory>
     </file>
+
+    <file>
+      <source>${basedir}/src/main/resources/pass.txt</source>
+      <outputDirectory>/ambari-server-${project.version}</outputDirectory>
+    </file>
+
   </files>
   <fileSets>
     <!-- Distro files, readme, licenses, etc -->

+ 68 - 4
ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java

@@ -21,26 +21,57 @@ import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.HashMap;
+import java.util.Map;
 import java.util.Properties;
 
+import org.apache.commons.io.FileUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 
+import com.google.inject.Singleton;
+
 
 /**
  * Ambari configuration.
  * Reads properties from ambari.properties
  */
+@Singleton
 public class Configuration {
+
   private static final String AMBARI_CONF_VAR = "AMBARI_CONF_DIR";
   private static final String CONFIG_FILE = "ambari.properties";
   public static final String BOOTSTRAP_DIR = "bootstrap.dir";
   public static final String BOOTSTRAP_SCRIPT = "bootstrap.script";
+  public static final String SRVR_KSTR_DIR_KEY = "security.server.keys_dir";
+  public static final String SRVR_CRT_NAME_KEY = "security.server.cert_name";
+  public static final String SRVR_KEY_NAME_KEY = "security.server.key_name";
+  public static final String KSTR_NAME_KEY = "security.server.keystore_name";
+  public static final String SRVR_CRT_PASS_FILE_KEY = "security.server.crt_pass_file";
+  public static final String SRVR_CRT_PASS_KEY = "security.server.crt_pass";
+  public static final String CLIENT_SECURITY_KEY = "client.security";
+  private static final String SRVR_KSTR_DIR_DEFAULT = ".";
+  private static final String SRVR_CRT_NAME_DEFAULT = "ca.crt";
+  private static final String SRVR_KEY_NAME_DEFAULT = "ca.key";
+  private static final String KSTR_NAME_DEFAULT = "keystore.p12";
+  private static final String SRVR_CRT_PASS_FILE_DEFAULT ="pass.txt";
+  private static final String CLIENT_SECURITY_DEFAULT = "local";
+
 
+
+  
   private static final Log LOG = LogFactory.getLog(Configuration.class);
 
+  private static Configuration instance;
+
   private Properties properties;
-  
+
+
+  private Map<String, String> configsMap;
+
+
   Configuration() {
     this(readConfigFile());
   }
@@ -52,6 +83,29 @@ public class Configuration {
    */
   public Configuration(Properties properties) {
     this.properties = properties;
+
+    configsMap = new HashMap<String, String>();
+    configsMap.put(SRVR_KSTR_DIR_KEY, properties.getProperty(SRVR_KSTR_DIR_KEY, SRVR_KSTR_DIR_DEFAULT));
+    configsMap.put(SRVR_KSTR_DIR_KEY, properties.getProperty(SRVR_KSTR_DIR_KEY, SRVR_KSTR_DIR_DEFAULT));
+    configsMap.put(SRVR_CRT_NAME_KEY, properties.getProperty(SRVR_CRT_NAME_KEY, SRVR_CRT_NAME_DEFAULT));
+    configsMap.put(SRVR_KEY_NAME_KEY, properties.getProperty(SRVR_KEY_NAME_KEY, SRVR_KEY_NAME_DEFAULT));
+    configsMap.put(KSTR_NAME_KEY, properties.getProperty(KSTR_NAME_KEY, KSTR_NAME_DEFAULT));
+    configsMap.put(SRVR_CRT_PASS_FILE_KEY, properties.getProperty(SRVR_CRT_PASS_FILE_KEY, SRVR_CRT_PASS_FILE_DEFAULT));
+    configsMap.put(CLIENT_SECURITY_KEY, properties.getProperty(CLIENT_SECURITY_KEY, CLIENT_SECURITY_DEFAULT));
+
+    try {
+        File passFile = new File(configsMap.get(SRVR_KSTR_DIR_KEY) + File.separator 
+            + configsMap.get(SRVR_CRT_PASS_FILE_KEY));
+        if (passFile.exists()) {
+          String srvrCrtPass = FileUtils.readFileToString(passFile);
+          configsMap.put(SRVR_CRT_PASS_KEY, srvrCrtPass.trim());
+        } else {
+          LOG.info("Not found pass file at " + passFile);
+        }
+      } catch (IOException e) {
+      e.printStackTrace();
+      throw new RuntimeException("Error reading certificate password from file");
+    }
   }
 
   /**
@@ -75,11 +129,11 @@ public class Configuration {
       LOG.info("No configuration file " + filename + " found.", fnf);
     } catch (IOException ie) {
       throw new IllegalArgumentException("Can't read configuration file " +
-                                         filename, ie);
+          filename, ie);
     }
     return properties;
   }
-  
+
   public File getBootStrapDir() {
     String fileName = properties.getProperty(BOOTSTRAP_DIR);
     if (fileName == null) {
@@ -87,7 +141,7 @@ public class Configuration {
     }
     return new File(fileName);
   }
-  
+
   public String getBootStrapScript() {
     String bootscript = properties.getProperty(BOOTSTRAP_SCRIPT);
     if (bootscript == null) {
@@ -95,4 +149,14 @@ public class Configuration {
     }
     return bootscript;
   }
+  
+  /**
+   * Get the map with server config parameters.
+   * Keys - public constants of this class
+   * @return the map with server config parameters
+   */
+  public Map<String, String> getConfigsMap() {
+    return configsMap;
+  }
+
 }

+ 154 - 71
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java

@@ -19,85 +19,168 @@
 package org.apache.ambari.server.controller;
 
 
-import java.io.IOException;
-
-import org.apache.ambari.server.agent.HeartBeatHandler;
+import com.google.inject.Guice;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import com.google.inject.Singleton;
+import com.google.inject.persist.jpa.JpaPersistModule;
+import com.sun.jersey.spi.container.servlet.ServletContainer;
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.orm.GuiceJpaInitializer;
+import org.apache.ambari.server.security.CertificateManager;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.mortbay.jetty.Server;
+import org.mortbay.jetty.security.SslSocketConnector;
 import org.mortbay.jetty.servlet.Context;
 import org.mortbay.jetty.servlet.DefaultServlet;
 import org.mortbay.jetty.servlet.ServletHolder;
+import org.mortbay.jetty.webapp.WebAppContext;
+import org.springframework.beans.factory.config.ConfigurableListableBeanFactory;
+import org.springframework.context.support.ClassPathXmlApplicationContext;
+import org.springframework.web.context.WebApplicationContext;
+import org.springframework.web.context.support.GenericWebApplicationContext;
 
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.Singleton;
-import com.sun.jersey.spi.container.servlet.ServletContainer;
+import java.io.File;
+import java.io.IOException;
+import java.net.URL;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
 
 @Singleton
 public class AmbariServer {
- private static Log LOG = LogFactory.getLog(AmbariServer.class);
- public static int CLIENT_PORT = 4080;
- private Server server = null;
- public volatile boolean running = true; // true while controller runs
-
- public void run() {
-   server = new Server(CLIENT_PORT);
-
-   try {
-     Context root = new Context(server, "/", Context.SESSIONS);
-     ServletHolder rootServlet = root.addServlet(DefaultServlet.class, "/");
-     rootServlet.setInitOrder(1);
-
-     ServletHolder sh = new ServletHolder(ServletContainer.class);
-     sh.setInitParameter("com.sun.jersey.config.property.resourceConfigClass",
-       "com.sun.jersey.api.core.PackagesResourceConfig");
-     sh.setInitParameter("com.sun.jersey.config.property.packages",
-       "org.apache.ambari.server.api.rest");
-     root.addServlet(sh, "/api/*");
-     sh.setInitOrder(2);
-
-     ServletHolder agent = new ServletHolder(ServletContainer.class);
-     agent.setInitParameter("com.sun.jersey.config.property.resourceConfigClass",
-       "com.sun.jersey.api.core.PackagesResourceConfig");
-     agent.setInitParameter("com.sun.jersey.config.property.packages",
-       "org.apache.ambari.server.agent.rest");
-     root.addServlet(agent, "/agent/*");
-     agent.setInitOrder(3);
-
-     server.setStopAtShutdown(true);
-
-     /*
-      * Start the server after controller state is recovered.
-      */
-     server.start();
-     LOG.info("Started Server");
-     server.join();
-     LOG.info("Joined the Server");
-   } catch (Exception e) {
-     LOG.error("Error in the server", e);
-
-   }
- }
-
- public void stop() throws Exception {
-   try {
-     server.stop();
-   } catch (Exception e) {
-     LOG.error("Error stopping the server", e);
-   }
- }
-
- public static void main(String[] args) throws IOException {
-   Injector injector = Guice.createInjector(new ControllerModule());
-   try {
-     LOG.info("Getting the controller");
-     AmbariServer server = injector.getInstance(AmbariServer.class);
-     if (server != null) {
-       server.run();
-     }
-   } catch(Throwable t) {
-     LOG.error("Failed to run the Ambari Server", t);
-   }
- }
+  public static final String PERSISTENCE_PROVIDER = "ambari-postgres";
+  private static Log LOG = LogFactory.getLog(AmbariServer.class);
+  public static int CLIENT_PORT = 4080;
+  public static int CLIENT_SECURED_PORT = 8443;
+  private Server server = null;
+  public volatile boolean running = true; // true while controller runs
+
+  final String WEB_APP_DIR = "webapp";
+  final URL warUrl = this.getClass().getClassLoader().getResource(WEB_APP_DIR);
+  final String warUrlString = warUrl.toExternalForm();
+  final String CONTEXT_PATH = "/";
+  final String SPRING_CONTEXT_LOCATION = "classpath:/webapp/WEB-INF/spring-security.xml";
+
+  @Inject
+  Configuration configs;
+  @Inject
+  CertificateManager certMan;
+  @Inject
+  Injector injector;
+
+  public void run() {
+    server = new Server(CLIENT_PORT);
+
+    try {
+      ClassPathXmlApplicationContext parentSpringAppContext = new ClassPathXmlApplicationContext();
+      parentSpringAppContext.refresh();
+      ConfigurableListableBeanFactory factory = parentSpringAppContext.getBeanFactory();
+      factory.registerSingleton("guiceInjector", injector); //Spring Security xml config depends on this Bean
+
+      String[] contextLocations = {SPRING_CONTEXT_LOCATION};
+      ClassPathXmlApplicationContext springAppContext = new ClassPathXmlApplicationContext(contextLocations, parentSpringAppContext);
+
+      WebAppContext webAppContext = new WebAppContext(warUrlString, CONTEXT_PATH);
+
+      GenericWebApplicationContext springWebAppContext = new GenericWebApplicationContext();
+      springWebAppContext.setServletContext(webAppContext.getServletContext());
+      springWebAppContext.setParent(springAppContext);
+
+      webAppContext.getServletContext().setAttribute(WebApplicationContext.ROOT_WEB_APPLICATION_CONTEXT_ATTRIBUTE, springWebAppContext);
+
+      server.setHandler(webAppContext);
+
+      certMan.initRootCert();
+      Context root =
+//              new Context(webAppContext, "/", Context.SESSIONS);
+              webAppContext;
+
+      ServletHolder rootServlet = root.addServlet(DefaultServlet.class, "/");
+      rootServlet.setInitOrder(1);
+
+
+      //Secured connector for 2-way auth
+      SslSocketConnector sslConnector = new SslSocketConnector();
+      sslConnector.setPort(CLIENT_SECURED_PORT);
+
+      Map<String, String> configsMap = configs.getConfigsMap();
+      String keystore = configsMap.get(Configuration.SRVR_KSTR_DIR_KEY) + File.separator + configsMap.get(Configuration.KSTR_NAME_KEY);
+      String srvrCrtPass = configsMap.get(Configuration.SRVR_CRT_PASS_KEY);
+
+      sslConnector.setKeystore(keystore);
+      sslConnector.setTruststore(keystore);
+      sslConnector.setPassword(srvrCrtPass);
+      sslConnector.setKeyPassword(srvrCrtPass);
+      sslConnector.setTrustPassword(srvrCrtPass);
+      sslConnector.setKeystoreType("PKCS12");
+      sslConnector.setTruststoreType("PKCS12");
+      sslConnector.setNeedClientAuth(true);
+
+      server.addConnector(sslConnector);
+
+      ServletHolder sh = new ServletHolder(ServletContainer.class);
+      sh.setInitParameter("com.sun.jersey.config.property.resourceConfigClass",
+              "com.sun.jersey.api.core.PackagesResourceConfig");
+      sh.setInitParameter("com.sun.jersey.config.property.packages",
+              "org.apache.ambari.server.api.rest");
+      root.addServlet(sh, "/api/*");
+      sh.setInitOrder(2);
+
+      ServletHolder agent = new ServletHolder(ServletContainer.class);
+      agent.setInitParameter("com.sun.jersey.config.property.resourceConfigClass",
+              "com.sun.jersey.api.core.PackagesResourceConfig");
+      agent.setInitParameter("com.sun.jersey.config.property.packages",
+              "org.apache.ambari.server.agent.rest");
+      root.addServlet(agent, "/agent/*");
+      agent.setInitOrder(3);
+
+      ServletHolder cert = new ServletHolder(ServletContainer.class);
+      cert.setInitParameter("com.sun.jersey.config.property.resourceConfigClass",
+              "com.sun.jersey.api.core.PackagesResourceConfig");
+      cert.setInitParameter("com.sun.jersey.config.property.packages",
+              "org.apache.ambari.server.security.unsecured.rest");
+      root.addServlet(cert, "/cert/*");
+      cert.setInitOrder(4);
+
+      server.setStopAtShutdown(true);
+
+      springAppContext.start();
+      /*
+       * Start the server after controller state is recovered.
+       */
+      server.start();
+      LOG.info("Started Server");
+      server.join();
+      LOG.info("Joined the Server");
+    } catch (Exception e) {
+      LOG.error("Error in the server", e);
+    }
+  }
+
+  public void stop() throws Exception {
+    try {
+      server.stop();
+    } catch (Exception e) {
+      LOG.error("Error stopping the server", e);
+    }
+  }
+
+  public static void main(String[] args) throws IOException {
+    Injector injector = Guice.createInjector(new ControllerModule(), new JpaPersistModule(PERSISTENCE_PROVIDER));
+
+    try {
+      LOG.info("Getting the controller");
+      AmbariServer server = injector.getInstance(AmbariServer.class);
+      CertificateManager certMan = injector.getInstance(CertificateManager.class);
+      injector.getInstance(GuiceJpaInitializer.class);
+      certMan.initRootCert();
+      if (server != null) {
+        server.run();
+      }
+    } catch (Throwable t) {
+      LOG.error("Failed to run the Ambari Server", t);
+    }
+  }
 }

+ 2 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java

@@ -17,6 +17,7 @@
  */
 package org.apache.ambari.server.controller;
 import org.apache.ambari.server.agent.rest.AgentResource;
+import org.apache.ambari.server.security.unsecured.rest.CertificateDownload;
 
 import com.google.inject.AbstractModule;
 
@@ -29,5 +30,6 @@ public class ControllerModule extends AbstractModule {
   @Override
   protected void configure() {
     requestStaticInjection(AgentResource.class);
+    requestStaticInjection(CertificateDownload.class);
   }
 }

+ 55 - 0
ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RoleDAO.java

@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.orm.dao;
+
+import com.google.inject.Inject;
+import com.google.inject.Provider;
+import com.google.inject.persist.Transactional;
+import org.apache.ambari.server.orm.entities.RoleEntity;
+
+import javax.persistence.EntityManager;
+
+public class RoleDAO {
+
+  @Inject
+  Provider<EntityManager> entityManagerProvider;
+
+  public RoleEntity findByName(String roleName) {
+    return entityManagerProvider.get().find(RoleEntity.class, roleName);
+  }
+
+  public void create(RoleEntity roleName) {
+    entityManagerProvider.get().persist(roleName);
+  }
+
+  @Transactional
+  public RoleEntity merge(RoleEntity roleName) {
+    return entityManagerProvider.get().merge(roleName);
+  }
+
+  @Transactional
+  public void remove(RoleEntity roleName) {
+    entityManagerProvider.get().remove(roleName);
+  }
+
+  @Transactional
+  public void removeByName(String roleName) {
+    remove(findByName(roleName));
+  }
+
+}

+ 55 - 0
ambari-server/src/main/java/org/apache/ambari/server/orm/dao/UserDAO.java

@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.orm.dao;
+
+import com.google.inject.Inject;
+import com.google.inject.Provider;
+import com.google.inject.persist.Transactional;
+import org.apache.ambari.server.orm.entities.UserEntity;
+
+import javax.persistence.EntityManager;
+
+public class UserDAO {
+
+  @Inject
+  Provider<EntityManager> entityManagerProvider;
+
+  public UserEntity findByName(String userName) {
+    return entityManagerProvider.get().find(UserEntity.class, userName);
+  }
+
+  public void create(UserEntity userName) {
+    entityManagerProvider.get().persist(userName);
+  }
+
+  @Transactional
+  public UserEntity merge(UserEntity userName) {
+    return entityManagerProvider.get().merge(userName);
+  }
+
+  @Transactional
+  public void remove(UserEntity userName) {
+    entityManagerProvider.get().remove(userName);
+  }
+
+  @Transactional
+  public void removeByName(String userName) {
+    remove(findByName(userName));
+  }
+
+}

+ 51 - 0
ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RoleEntity.java

@@ -0,0 +1,51 @@
+package org.apache.ambari.server.orm.entities;
+
+import javax.persistence.Entity;
+import javax.persistence.Id;
+import javax.persistence.ManyToMany;
+import java.util.Set;
+
+@javax.persistence.Table(name = "roles", schema = "ambari", catalog = "")
+@Entity
+public class RoleEntity {
+
+  private String roleName;
+
+  @javax.persistence.Column(name = "role_name")
+  @Id
+  public String getRoleName() {
+    return roleName;
+  }
+
+  public void setRoleName(String roleName) {
+    this.roleName = roleName;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+
+    RoleEntity that = (RoleEntity) o;
+
+    if (roleName != null ? !roleName.equals(that.roleName) : that.roleName != null) return false;
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    return roleName != null ? roleName.hashCode() : 0;
+  }
+
+  private Set<UserEntity> userEntities;
+
+  @ManyToMany(mappedBy = "roleEntities")
+  public Set<UserEntity> getUserEntities() {
+    return userEntities;
+  }
+
+  public void setUserEntities(Set<UserEntity> userEntities) {
+    this.userEntities = userEntities;
+  }
+}

+ 94 - 0
ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UserEntity.java

@@ -0,0 +1,94 @@
+package org.apache.ambari.server.orm.entities;
+
+import javax.persistence.*;
+import java.sql.Timestamp;
+import java.util.Set;
+
+@javax.persistence.Table(name = "users", schema = "ambari", catalog = "")
+@Entity
+public class UserEntity {
+
+  private String userName;
+
+  @javax.persistence.Column(name = "user_name")
+  @Id
+  public String getUserName() {
+    return userName;
+  }
+
+  public void setUserName(String userName) {
+    this.userName = userName;
+  }
+
+  private String userPassword;
+
+  @javax.persistence.Column(name = "user_password")
+  @Basic
+  public String getUserPassword() {
+    return userPassword;
+  }
+
+  public void setUserPassword(String userPassword) {
+    this.userPassword = userPassword;
+  }
+
+  private Boolean ldapUser;
+
+  @javax.persistence.Column(name = "ldap_user")
+  @Basic
+  public Boolean getLdapUser() {
+    return ldapUser;
+  }
+
+  public void setLdapUser(Boolean ldapUser) {
+    this.ldapUser = ldapUser;
+  }
+
+  private Timestamp createTime;
+
+  @javax.persistence.Column(name = "create_time")
+  @Basic
+  public Timestamp getCreateTime() {
+    return createTime;
+  }
+
+  public void setCreateTime(Timestamp createTime) {
+    this.createTime = createTime;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+
+    UserEntity that = (UserEntity) o;
+
+    if (createTime != null ? !createTime.equals(that.createTime) : that.createTime != null) return false;
+    if (ldapUser != null ? !ldapUser.equals(that.ldapUser) : that.ldapUser != null) return false;
+    if (userName != null ? !userName.equals(that.userName) : that.userName != null) return false;
+    if (userPassword != null ? !userPassword.equals(that.userPassword) : that.userPassword != null) return false;
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    int result = userName != null ? userName.hashCode() : 0;
+    result = 31 * result + (userPassword != null ? userPassword.hashCode() : 0);
+    result = 31 * result + (ldapUser != null ? ldapUser.hashCode() : 0);
+    result = 31 * result + (createTime != null ? createTime.hashCode() : 0);
+    return result;
+  }
+
+  private Set<RoleEntity> roleEntities;
+
+  @javax.persistence.JoinTable(name = "user_roles", catalog = "", schema = "ambari", joinColumns = {@JoinColumn(name = "user_name")}, inverseJoinColumns = {@JoinColumn(name = "user_name")})
+  @ManyToMany
+  public Set<RoleEntity> getRoleEntities() {
+    return roleEntities;
+  }
+
+  public void setRoleEntities(Set<RoleEntity> roleEntities) {
+    this.roleEntities = roleEntities;
+  }
+}

+ 96 - 0
ambari-server/src/main/java/org/apache/ambari/server/security/AmbariUserDetailsService.java

@@ -0,0 +1,96 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.security;
+
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.orm.dao.UserDAO;
+import org.apache.ambari.server.orm.entities.RoleEntity;
+import org.apache.ambari.server.orm.entities.UserEntity;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.security.core.GrantedAuthority;
+import org.springframework.security.core.authority.SimpleGrantedAuthority;
+import org.springframework.security.core.userdetails.User;
+import org.springframework.security.core.userdetails.UserDetails;
+import org.springframework.security.core.userdetails.UserDetailsService;
+import org.springframework.security.core.userdetails.UsernameNotFoundException;
+
+import java.util.ArrayList;
+import java.util.List;
+
+public class AmbariUserDetailsService implements UserDetailsService {
+  private static final Logger log = LoggerFactory.getLogger(AmbariUserDetailsService.class);
+
+  Injector injector;
+  Configuration configuration;
+  UserDAO userDAO;
+
+
+  @Inject
+  public AmbariUserDetailsService(Injector injector, Configuration configuration, UserDAO userDAO) {
+    this.injector = injector;
+    this.configuration = configuration;
+    this.userDAO = userDAO;
+  }
+
+  /**
+   * Loads Spring Security UserDetails from identity storage according to Configuration
+   * @param username username
+   * @return UserDetails
+   * @throws UsernameNotFoundException when user not found or have empty roles
+   */
+  @Override
+  public UserDetails loadUserByUsername(String username) throws UsernameNotFoundException {
+    log.info("Loading user by name: " + username);
+
+    UserEntity user = null;
+    try {
+      user = userDAO.findByName(username);
+    } catch (Exception e) {
+      System.err.println(e);
+    }
+
+    if (isLdapEnabled() && (user == null || user.getLdapUser())) {
+      //TODO implement LDAP
+    }
+
+    if (user == null) {
+      log.info("user not found ");
+      throw new UsernameNotFoundException("Username " + username + " not found");
+    }else if (user.getRoleEntities().isEmpty()) {
+      System.err.println("no roles ex");
+      throw new UsernameNotFoundException("Username " + username + " has no roles");
+    }
+
+    List<GrantedAuthority> authorities = new ArrayList<GrantedAuthority>(user.getRoleEntities().size());
+
+    System.err.println("Authorities number = " + user.getRoleEntities().size());
+    for (RoleEntity roleEntity : user.getRoleEntities()) {
+      authorities.add(new SimpleGrantedAuthority(roleEntity.getRoleName().toUpperCase()));
+    }
+
+    return new User(user.getUserName(), user.getUserPassword(), authorities);
+  }
+
+  private boolean isLdapEnabled() {
+    return ClientSecurityType.fromString(configuration.getConfigsMap().get(Configuration.CLIENT_SECURITY_KEY)) == ClientSecurityType.LDAP;
+  }
+
+}

+ 164 - 0
ambari-server/src/main/java/org/apache/ambari/server/security/CertificateManager.java

@@ -0,0 +1,164 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.security;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.text.MessageFormat;
+import java.util.Map;
+
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import com.google.inject.Inject;
+import com.google.inject.Singleton;
+
+/**
+ * Ambari security.
+ * Manages server and agent certificates
+ */
+@Singleton
+public class CertificateManager {
+	
+  @Inject Configuration configs;
+	
+  private static final Log LOG = LogFactory.getLog(CertificateManager.class);
+	
+	
+  private static final String GEN_SRVR_KEY = "openssl genrsa -des3 -passout pass:{0} -out {1}/{2} 4096 ";
+  private static final String GEN_SRVR_REQ = "openssl req -passin pass:{0} -new -key {1}/{2} -out {1}/{3} -batch";
+  private static final String SIGN_SRVR_CRT = "openssl x509 -passin pass:{0} -req -days 365 -in {1}/{3} -signkey {1}/{2} -out {1}/{3} \n";
+  private static final String EXPRT_KSTR = "openssl pkcs12 -export -in {1}/{3} -inkey {1}/{2} -certfile {1}/{3} -out {1}/{4} -password pass:{0} -passin pass:{0} \n";
+	
+  /**
+   * Verify that root certificate exists, generate it otherwise.
+   */
+  public void initRootCert() {
+    LOG.info("Initialization of root certificate");
+		
+    boolean certExists = isCertExists();
+		
+    LOG.info("Certificate exists:" + certExists);
+		
+	if (!certExists) {
+      generateServerCertificate();
+	}
+  }
+	
+  /**
+   * Checks root certificate state.
+   * @return "true" if certificate exists
+   */
+  private boolean isCertExists() {
+
+    Map<String, String> configsMap = configs.getConfigsMap();
+    String srvrKstrDir = configsMap.get(Configuration.SRVR_KSTR_DIR_KEY);
+    String srvrCrtName = configsMap.get(Configuration.SRVR_CRT_NAME_KEY);
+    File certFile = new File(srvrKstrDir + File.separator + srvrCrtName);
+    
+	return certFile.exists();
+	}
+  
+  
+  /**
+   * Runs os command
+   */
+  private void runCommand(String command) {
+	  
+	  String line = null;
+      Process process = null;
+      try {
+        process = Runtime.getRuntime().exec(command);
+        BufferedReader br = new BufferedReader(new InputStreamReader(process.getInputStream()));
+        
+        while ((line = br.readLine()) != null) {
+          LOG.info(line);
+        }
+        
+        try {
+          process.waitFor();
+        }
+        catch (InterruptedException e) {
+          e.printStackTrace();
+        }
+      }
+      catch (IOException e){
+        e.printStackTrace();
+      }
+	  
+  }
+  
+  private void generateServerCertificate() {
+    LOG.info("Generation of server certificate");
+    
+    Map<String, String> configsMap = configs.getConfigsMap();
+    String srvrKstrDir = configsMap.get(Configuration.SRVR_KSTR_DIR_KEY);
+    String srvrCrtName = configsMap.get(Configuration.SRVR_CRT_NAME_KEY);
+    String srvrKeyName = configsMap.get(Configuration.SRVR_KEY_NAME_KEY);
+    String kstrName = configsMap.get(Configuration.KSTR_NAME_KEY);
+    String srvrCrtPass = configsMap.get(Configuration.SRVR_CRT_PASS_KEY);
+    
+    Object[] scriptArgs = {srvrCrtPass, srvrKstrDir, srvrKeyName,
+			               srvrCrtName, kstrName};
+
+    String command = MessageFormat.format(GEN_SRVR_KEY,scriptArgs);
+
+    LOG.info("Executing command:" + command);
+    
+	runCommand(command);
+	
+    command = MessageFormat.format(GEN_SRVR_REQ,scriptArgs);
+    
+    LOG.info("Executing command:" + command);
+    
+	runCommand(command);
+	
+    command = MessageFormat.format(SIGN_SRVR_CRT,scriptArgs);
+    
+    LOG.info("Executing command:" + command);
+    
+	runCommand(command);
+	
+    command = MessageFormat.format(EXPRT_KSTR,scriptArgs);
+    
+    LOG.info("Executing command:" + command);
+    
+	runCommand(command);
+	
+	}
+
+  /**
+   * Returns server certificate content
+   * @return string with server certificate content
+   */
+  public String getServerCert() {
+    Map<String, String> configsMap = configs.getConfigsMap();
+    File certFile = new File(configsMap.get(Configuration.SRVR_KSTR_DIR_KEY) + File.separator + configsMap.get(Configuration.SRVR_CRT_NAME_KEY));  
+    String srvrCrtContent = null;
+    try {
+      srvrCrtContent = FileUtils.readFileToString(certFile);
+	} catch (IOException e) {
+        LOG.error(e.getMessage());
+	}   
+    return srvrCrtContent;
+	}
+}

+ 26 - 0
ambari-server/src/main/java/org/apache/ambari/server/security/ClientSecurityType.java

@@ -0,0 +1,26 @@
+package org.apache.ambari.server.security;
+
+public enum ClientSecurityType {
+  LOCAL("local"),
+  LDAP("ldap");
+
+  private String value;
+  ClientSecurityType(String value) {
+    this.value = value;
+  }
+
+  public static ClientSecurityType fromString(String value) {
+    for (ClientSecurityType securityType : ClientSecurityType.values()) {
+      if (securityType.toString().equals(value)) {
+        return securityType;
+      }
+    }
+    return null;
+  }
+
+
+  @Override
+  public String toString() {
+    return value;
+  }
+}

+ 47 - 0
ambari-server/src/main/java/org/apache/ambari/server/security/unsecured/rest/CertificateDownload.java

@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.security.unsecured.rest;
+
+
+import javax.ws.rs.GET;
+import javax.ws.rs.Path;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.MediaType;
+
+import org.apache.ambari.server.security.CertificateManager;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import com.google.inject.Inject;
+
+@Path("/ca")
+public class CertificateDownload {
+  private static Log LOG = LogFactory.getLog(CertificateDownload.class);
+  private static CertificateManager certMan;
+  
+  @Inject
+  static void init(CertificateManager instance) {
+    certMan = instance;
+  }
+  
+  @GET
+  @Produces({MediaType.TEXT_PLAIN})
+  public String downloadSrvrCrt() {
+    return certMan.getServerCert();
+  }
+}

+ 5 - 1
ambari-server/src/main/resources/META-INF/persistence.xml

@@ -33,9 +33,11 @@
     <class>org.apache.ambari.server.orm.entities.ServiceConfigEntity</class>
     <class>org.apache.ambari.server.orm.entities.ServiceDesiredStateEntity</class>
     <class>org.apache.ambari.server.orm.entities.ServiceStateEntity</class>
+    <class>org.apache.ambari.server.orm.entities.RoleEntity</class>
+    <class>org.apache.ambari.server.orm.entities.UserEntity</class>
 
     <properties>
-      <property name="javax.persistence.jdbc.url" value="jdbc:postgresql://localhost/ambari"/>
+      <property name="javax.persistence.jdbc.url" value="jdbc:postgresql://localhost/postgres"/>
       <property name="javax.persistence.jdbc.driver" value="org.postgresql.Driver"/>
       <property name="javax.persistence.jdbc.user" value="ambari-server"/>
       <property name="javax.persistence.jdbc.password" value="bigdata"/>
@@ -58,6 +60,8 @@
     <class>org.apache.ambari.server.orm.entities.ServiceConfigEntity</class>
     <class>org.apache.ambari.server.orm.entities.ServiceDesiredStateEntity</class>
     <class>org.apache.ambari.server.orm.entities.ServiceStateEntity</class>
+    <class>org.apache.ambari.server.orm.entities.RoleEntity</class>
+    <class>org.apache.ambari.server.orm.entities.UserEntity</class>
 
     <properties>
       <property name="javax.persistence.jdbc.url" value="jdbc:derby:memory:myDB;create=true"/>

+ 1 - 0
ambari-server/src/main/resources/pass.txt

@@ -0,0 +1 @@
+QWERTYUIO

+ 35 - 0
ambari-server/src/main/resources/users.ldif

@@ -0,0 +1,35 @@
+dn: ou=groups,dc=ambari,dc=apache,dc=org
+objectclass:top
+objectclass:organizationalUnit
+ou: groups
+
+dn: ou=people,dc=ambari,dc=apache,dc=org
+objectclass:top
+objectclass:organizationalUnit
+ou: people
+
+dn: uid=allowedUser,ou=people,dc=ambari,dc=apache,dc=org
+objectclass:top
+objectclass:person
+objectclass:organizationalPerson
+objectclass:inetOrgPerson
+cn: CraigWalls
+sn: Walls
+uid: allowedUser
+userPassword:password
+
+dn: uid=deniedUser,ou=people,dc=ambari,dc=apache,dc=org
+objectclass:top
+objectclass:person
+objectclass:organizationalPerson
+objectclass:inetOrgPerson
+cn: JohnSmith
+sn: Smith
+uid: deniedUser
+userPassword:password
+
+dn: cn=admin,ou=groups,dc=ambari,dc=apache,dc=org
+objectclass:top
+objectclass:groupOfNames
+cn: admin
+member: uid=allowedUser,ou=people,dc=ambari,dc=apache,dc=org

+ 52 - 0
ambari-server/src/main/resources/webapp/WEB-INF/spring-security.xml

@@ -0,0 +1,52 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<beans:beans xmlns="http://www.springframework.org/schema/security"
+             xmlns:beans="http://www.springframework.org/schema/beans"
+             xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+             xsi:schemaLocation="http://www.springframework.org/schema/beans
+                    http://www.springframework.org/schema/beans/spring-beans-3.0.xsd
+                    http://www.springframework.org/schema/security
+                    http://www.springframework.org/schema/security/spring-security-3.1.xsd">
+
+  <http use-expressions="true" auto-config="true"
+        disable-url-rewriting="true"
+          >
+    <http-basic/>
+    <intercept-url pattern="/api/*" access="hasRole('ADMIN')"/>
+    <intercept-url pattern="/**" access="isAuthenticated()"/>
+  </http>
+
+  <!--<ldap-server id="ldapServer" root="dc=ambari,dc=apache,dc=org"/>-->
+
+  <authentication-manager>
+
+    <authentication-provider user-service-ref="ambariUserService">
+
+    </authentication-provider>
+
+  </authentication-manager>
+
+  <beans:bean id="ambariUserService"
+              class="org.springframework.security.core.userdetails.UserDetailsService"
+              factory-bean="guiceInjector"
+              factory-method="getInstance"
+              lazy-init="true">
+    <beans:constructor-arg type="java.lang.Class" value="org.apache.ambari.server.security.AmbariUserDetailsService"/>
+  </beans:bean>
+
+
+</beans:beans>

+ 33 - 0
ambari-server/src/main/resources/webapp/WEB-INF/web.xml

@@ -0,0 +1,33 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<web-app version="2.5" xmlns="http://java.sun.com/xml/ns/javaee"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://java.sun.com/xml/ns/javaee
+            http://java.sun.com/xml/ns/javaee/web-app_2_5.xsd">
+
+  <display-name>Ambari-web</display-name>
+
+  <filter>
+    <filter-name>springSecurityFilterChain</filter-name>
+    <filter-class>org.springframework.web.filter.DelegatingFilterProxy</filter-class>
+  </filter>
+
+  <filter-mapping>
+    <filter-name>springSecurityFilterChain</filter-name>
+    <url-pattern>/*</url-pattern>
+  </filter-mapping>
+</web-app>

+ 26 - 0
ambari-server/src/main/resources/webapp/index.html

@@ -0,0 +1,26 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
+        "http://www.w3.org/TR/html4/loose.dtd">
+<html>
+<head>
+    <title>AMBARI TEST PAGE</title>
+</head>
+<body>
+<h1>AMBARI TEST PAGE</h1>
+</body>
+</html>