Selaa lähdekoodia

AMBARI-8949. Add support for Ranger. (Gautam Borad via yusaku)

Yusaku Sako 10 vuotta sitten
vanhempi
commit
1052efd85d
48 muutettua tiedostoa jossa 2767 lisäystä ja 27 poistoa
  1. 221 0
      ambari-common/src/main/python/resource_management/libraries/functions/ranger_functions.py
  2. 1 0
      ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metainfo.xml
  3. 2 2
      ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py
  4. 2 1
      ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_regionserver.py
  5. 13 0
      ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params.py
  6. 187 0
      ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/setup_ranger_hbase.py
  7. 1 0
      ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metainfo.xml
  8. 2 1
      ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
  9. 13 0
      ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
  10. 191 0
      ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py
  11. 1 0
      ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/metainfo.xml
  12. 2 1
      ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server.py
  13. 12 0
      ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params.py
  14. 182 0
      ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/setup_ranger_hive.py
  15. 156 0
      ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/ranger-knox-plugin-properties.xml
  16. 1 0
      ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/metainfo.xml
  17. 2 0
      ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/knox_gateway.py
  18. 12 0
      ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params.py
  19. 183 0
      ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/setup_ranger_knox.py
  20. 180 0
      ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/admin-properties.xml
  21. 49 0
      ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/ranger-env.xml
  22. 103 0
      ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/usersync-properties.xml
  23. 84 0
      ambari-server/src/main/resources/common-services/RANGER/0.4.0/metainfo.xml
  24. 43 0
      ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py
  25. 58 0
      ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py
  26. 56 0
      ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_usersync.py
  27. 53 0
      ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/service_check.py
  28. 153 0
      ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger.py
  29. 2 0
      ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json
  30. 150 0
      ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/ranger-hbase-plugin-properties.xml
  31. 156 0
      ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
  32. 163 0
      ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/ranger-hive-plugin-properties.xml
  33. 29 0
      ambari-server/src/main/resources/stacks/HDP/2.2/services/RANGER/metainfo.xml
  34. 109 1
      ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
  35. 6 0
      ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json
  36. 6 0
      ambari-server/src/test/python/stacks/2.0.6/configs/default.json
  37. 3 0
      ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json
  38. 3 0
      ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
  39. 3 0
      ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json
  40. 3 0
      ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json
  41. 3 0
      ambari-server/src/test/python/stacks/2.0.6/configs/hbase-preupgrade.json
  42. 3 0
      ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json
  43. 6 0
      ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
  44. 3 0
      ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json
  45. 145 18
      ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
  46. 7 3
      ambari-server/src/test/python/stacks/2.2/configs/default.json
  47. 3 0
      ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json
  48. 1 0
      ambari-web/app/config.js

+ 221 - 0
ambari-common/src/main/python/resource_management/libraries/functions/ranger_functions.py

@@ -0,0 +1,221 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import time
+import sys
+from StringIO import StringIO as BytesIO
+import json
+from resource_management.core.logger import Logger
+import urllib2, base64, httplib
+
+class Rangeradmin:
+  sInstance = None
+  def __init__(self, url= 'http://localhost:6080'):
+    
+    self.baseUrl      =  url 
+    self.urlLogin     = self.baseUrl + '/login.jsp'
+    self.urlLoginPost = self.baseUrl + '/j_spring_security_check'
+    self.urlRepos     = self.baseUrl + '/service/assets/assets'
+    self.urlReposPub  = self.baseUrl + '/service/public/api/repository'
+    self.urlPolicies  = self.baseUrl + '/service/public/api/policy'
+    self.urlGroups    = self.baseUrl + '/service/xusers/groups'
+    self.urlUsers     = self.baseUrl + '/service/xusers/users'   
+    self.urlSecUsers  = self.baseUrl + '/service/xusers/secure/users'   
+
+    self.session    = None
+    self.isLoggedIn = False
+
+  def get_repository_by_name_urllib2(self, name, component, status, usernamepassword):
+    try:
+      searchRepoURL = self.urlReposPub + "?name=" + name + "&type=" + component + "&status=" + status
+      request = urllib2.Request(searchRepoURL)
+      base64string = base64.encodestring(usernamepassword).replace('\n', '')
+      request.add_header("Content-Type", "application/json")   
+      request.add_header("Accept", "application/json")  
+      request.add_header("Authorization", "Basic %s" % base64string)   
+      result = urllib2.urlopen(request)
+      response_code =  result.getcode()
+      response = json.loads(result.read())
+
+      if response_code == 200 and len(response['vXRepositories']) > 0:
+        for repo in response['vXRepositories']:
+          repoDump = json.loads(json.JSONEncoder().encode(repo))
+          if repoDump['name'] == name:
+            return repoDump
+        return None
+      else:
+        return None
+    except urllib2.URLError, e:
+      if isinstance(e, urllib2.HTTPError):
+          Logger.error("HTTP Code: %s" % e.code)
+          Logger.error("HTTP Data: %s" % e.read())
+      else:
+          Logger.error("Error : %s" % (e.reason))
+      return None
+    except httplib.BadStatusLine:
+      Logger.error("Ranger Admin service is not reachable, please restart the service and then try again")
+      return None
+
+  def create_repository_urllib2(self, data, usernamepassword):
+    try:
+      searchRepoURL = self.urlReposPub
+      base64string = base64.encodestring('%s' % (usernamepassword)).replace('\n', '')
+      headers = {
+        'Accept': 'application/json',
+        "Content-Type": "application/json"
+      }
+      request = urllib2.Request(searchRepoURL, data, headers)
+      request.add_header("Authorization", "Basic %s" % base64string)   
+      result = urllib2.urlopen(request)
+      response_code =  result.getcode()
+      response = json.loads(json.JSONEncoder().encode(result.read()))
+      if response_code == 200 :
+        Logger.info('Repository created Successfully')
+        #Get Policies 
+        repoData     = json.loads(data)
+        repoName     = repoData['name']
+        typeOfPolicy = repoData['repositoryType']
+        ##Get Policies by repo name
+        policyList = self.get_policy_by_repo_name(name=repoName, component=typeOfPolicy, status="true", usernamepassword=usernamepassword)
+        if (len(policyList)) > 0 : 
+          policiesUpdateCount = 0
+          for policy in policyList:
+            updatedPolicyObj = self.get_policy_params(typeOfPolicy,policy)
+            policyResCode, policyResponse = self.update_ranger_policy(updatedPolicyObj['id'], json.dumps(updatedPolicyObj), usernamepassword)
+            if policyResCode == 200:
+              policiesUpdateCount = policiesUpdateCount+1
+            else:
+              Logger.info('Policy Update failed')  
+          ##Check for count of updated policies
+          if len(policyList) == policiesUpdateCount:
+            Logger.info("Ranger Repository created successfully and policies updated successfully providing ambari-qa user all permissions")
+            return response
+          else:
+            return None
+        else:
+          Logger.info("Policies not found for the newly created Repository")
+        return  None
+      else:
+        Logger.info('Repository creation failed')
+        return None  
+    except urllib2.URLError, e:
+      if isinstance(e, urllib2.HTTPError):
+          Logger.error("HTTP Code: %s" % e.code)
+          Logger.error("HTTP Data: %s" % e.read())
+      else:
+          Logger.error("Error: %s" % (e.reason))
+      return None
+    except httplib.BadStatusLine:
+      Logger.error("Ranger Admin service is not reachable, please restart the service and then try again")
+      return None
+
+  def check_ranger_login_urllib2(self, url,usernamepassword ):
+    try:
+      request = urllib2.Request(url)
+      base64string = base64.encodestring(usernamepassword).replace('\n', '')
+      request.add_header("Content-Type", "application/json")   
+      request.add_header("Accept", "application/json")  
+      request.add_header("Authorization", "Basic %s" % base64string)   
+      result = urllib2.urlopen(request)
+      response = result.read()
+      response_code =  result.getcode()
+      return response_code, response
+    except urllib2.URLError, e:
+      if isinstance(e, urllib2.HTTPError):
+          Logger.error("HTTP Code: %s" % e.code)
+          Logger.error("HTTP Data: %s" % e.read())
+      else:
+          Logger.error("Error : %s" % (e.reason))
+      return None, None
+    except httplib.BadStatusLine, e:
+      Logger.error("Ranger Admin service is not reachable, please restart the service and then try again")
+      return None, None      
+
+  def get_policy_by_repo_name(self, name, component, status, usernamepassword):
+    try:
+      searchPolicyURL = self.urlPolicies + "?repositoryName=" + name + "&repositoryType=" + component + "&isEnabled=" + status
+      request = urllib2.Request(searchPolicyURL)
+      base64string = base64.encodestring(usernamepassword).replace('\n', '')
+      request.add_header("Content-Type", "application/json")   
+      request.add_header("Accept", "application/json")  
+      request.add_header("Authorization", "Basic %s" % base64string)   
+      result = urllib2.urlopen(request)
+      response_code =  result.getcode()
+      response = json.loads(result.read())
+      if response_code == 200 and len(response['vXPolicies']) > 0:
+          return response['vXPolicies']
+      else:
+        return None
+    except urllib2.URLError, e:
+      if isinstance(e, urllib2.HTTPError):
+          Logger.error("HTTP Code: %s" % e.code)
+          Logger.error("HTTP Data: %s" % e.read())
+      else:
+          Logger.error("Error: %s" % (e.reason))
+      return None
+    except httplib.BadStatusLine:
+      Logger.error("Ranger Admin service is not reachable, please restart the service and then try again")
+      return None
+
+  def update_ranger_policy(self, policyId, data, usernamepassword):
+    try:
+      searchRepoURL = self.urlPolicies +"/"+str(policyId)
+      base64string = base64.encodestring('%s' % (usernamepassword)).replace('\n', '')
+      headers = {
+        'Accept': 'application/json',
+        "Content-Type": "application/json"
+      }
+      request = urllib2.Request(searchRepoURL, data, headers)
+      request.add_header("Authorization", "Basic %s" % base64string)   
+      request.get_method = lambda: 'PUT'
+      result = urllib2.urlopen(request)
+      response_code =  result.getcode()
+      response = json.loads(json.JSONEncoder().encode(result.read()))
+      if response_code == 200 :
+        Logger.info('Policy updated Successfully')
+        return response_code, response
+      else:
+        Logger.error('Update Policy failed')
+        return None, None
+    except urllib2.URLError, e:
+      if isinstance(e, urllib2.HTTPError):
+          Logger.error("HTTP Code: %s" % e.code)
+          Logger.error("HTTP Data: %s" % e.read())
+      else:
+          Logger.error("Error: %s" % (e.reason))
+      return None, None
+    except httplib.BadStatusLine:
+      Logger.error("Ranger Admin service is not reachable, please restart the service and then try again")
+      return None, None
+
+  def get_policy_params(self, typeOfPolicy,policyObj): 
+    
+    typeOfPolicy = typeOfPolicy.lower()
+    if typeOfPolicy == "hdfs":
+      policyObj['permMapList'] = [{'userList':['ambari-qa'],'permList':  ['Read','Write', 'Execute', 'Admin']}]
+    elif typeOfPolicy == "hive":
+      policyObj['permMapList'] = [{'userList':['ambari-qa'], 'permList':[ 'Select','Update', 'Create', 'Drop', 'Alter', 'Index', 'Lock', 'All', 'Admin' ]}]
+    elif typeOfPolicy == "hbase":
+      policyObj['permMapList'] = [{'userList':['ambari-qa'],'permList':[ 'Read', 'Write', 'Create', 'Admin']}]
+    elif typeOfPolicy == "knox":
+      policyObj['permMapList'] = [{'userList':['ambari-qa'], 'permList': ['Allow','Admin']}]
+    elif typeOfPolicy == "storm" : 
+      policyObj['permMapList'] = [{'userList':['ambari-qa'], 'permList':[ 'Submit Topology', 'File Upload', 'Get Nimbus Conf', 'Get Cluster Info', 'File Download', 'Kill Topology', 'Rebalance', 'Activate','Deactivate', 'Get Topology Conf', 'Get Topology', 'Get User Topology', 'Get Topology Info', 'Upload New Credential', 'Admin']}]
+    return policyObj
+

+ 1 - 0
ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metainfo.xml

@@ -137,6 +137,7 @@
         <config-type>hbase-site</config-type>
         <config-type>hbase-env</config-type>
         <config-type>hbase-log4j</config-type>
+        <config-type>ranger-hbase-plugin-properties</config-type>
       </configuration-dependencies>
 
     </service>

+ 2 - 2
ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py

@@ -27,7 +27,7 @@ from hbase import hbase
 from hbase_service import hbase_service
 from hbase_decommission import hbase_decommission
 import upgrade
-
+from setup_ranger_hbase import setup_ranger_hbase
          
 class HbaseMaster(Script):
 
@@ -52,7 +52,7 @@ class HbaseMaster(Script):
     import params
     env.set_params(params)
     self.configure(env) # for security
-
+    setup_ranger_hbase(env)  
     hbase_service( 'master',
       action = 'start'
     )

+ 2 - 1
ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_regionserver.py

@@ -26,6 +26,7 @@ from resource_management.libraries.functions.security_commons import build_expec
 from hbase import hbase
 from hbase_service import hbase_service
 import upgrade
+from setup_ranger_hbase import setup_ranger_hbase
 
          
 class HbaseRegionServer(Script):
@@ -56,7 +57,7 @@ class HbaseRegionServer(Script):
     import params
     env.set_params(params)
     self.configure(env) # for security
-
+    setup_ranger_hbase(env)  
     hbase_service( 'regionserver',
       action = 'start'
     )

+ 13 - 0
ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params.py

@@ -162,3 +162,16 @@ if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
     region_mover = format("/usr/hdp/current/hbase-{role_root}/bin/region_mover.rb")
     region_drainer = format("/usr/hdp/current/hbase-{role_root}/bin/draining_servers.rb")
     hbase_cmd = format("/usr/hdp/current/hbase-{role_root}/bin/hbase")
+
+if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+  # Setting Flag value for ranger hbase plugin
+  enable_ranger_hbase = False
+  user_input = config['configurations']['ranger-hbase-plugin-properties']['ranger-hbase-plugin-enabled']
+  if user_input.lower() == 'yes':
+    enable_ranger_hbase = True
+  elif user_input.lower() == 'no':
+    enable_ranger_hbase = False
+
+# ranger host
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+has_ranger_admin = not len(ranger_admin_hosts) == 0    

+ 187 - 0
ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/setup_ranger_hbase.py

@@ -0,0 +1,187 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import sys
+import fileinput
+import subprocess
+import json
+import re
+from resource_management import *
+from resource_management.libraries.functions.ranger_functions import Rangeradmin
+from resource_management.core.logger import Logger
+
+def setup_ranger_hbase(env):
+    import params
+    env.set_params(params)
+
+    if params.has_ranger_admin:
+        try:
+            command = 'hdp-select status hbase-client'
+            return_code, hdp_output = shell.call(command, timeout=20)
+        except Exception, e:
+            Logger.error(str(e))
+            raise Fail('Unable to execute hdp-select command to retrieve the version.')
+
+        if return_code != 0:
+            raise Fail('Unable to determine the current version because of a non-zero return code of {0}'.format(str(return_code)))
+
+        hdp_version = re.sub('hbase-client - ', '', hdp_output)
+        match = re.match('[0-9]+.[0-9]+.[0-9]+.[0-9]+-[0-9]+', hdp_version)
+
+        if match is None:
+            raise Fail('Failed to get extracted version')
+
+        file_path = '/usr/hdp/'+ hdp_version +'/ranger-hbase-plugin/install.properties'
+
+        ranger_hbase_dict = ranger_hbase_properties(params)
+        hbase_repo_data = hbase_repo_properties(params)
+
+        write_properties_to_file(file_path, ranger_hbase_dict)
+
+        if params.enable_ranger_hbase:
+            cmd = format('cd /usr/hdp/{hdp_version}/ranger-hbase-plugin/ && sh enable-hbase-plugin.sh')
+            ranger_adm_obj = Rangeradmin(url=ranger_hbase_dict['POLICY_MGR_URL'])
+            response_code, response_recieved = ranger_adm_obj.check_ranger_login_urllib2(ranger_hbase_dict['POLICY_MGR_URL'] + '/login.jsp', 'test:test')
+
+            if response_code is not None and response_code == 200:
+                repo = ranger_adm_obj.get_repository_by_name_urllib2(ranger_hbase_dict['REPOSITORY_NAME'], 'hbase', 'true', 'admin:admin')
+
+                if repo and repo['name'] == ranger_hbase_dict['REPOSITORY_NAME']:
+                    Logger.info('Hbase Repository exist')
+                else:
+                    response = ranger_adm_obj.create_repository_urllib2(hbase_repo_data, 'admin:admin')
+                    if response is not None:
+                        Logger.info('Hbase Repository created in Ranger admin')
+                    else:
+                        Logger.info('Hbase Repository creation failed in Ranger admin')
+            else:
+                Logger.info('Ranger service is not started on given host')
+        else:
+            cmd = format('cd /usr/hdp/{hdp_version}/ranger-hbase-plugin/ && sh disable-hbase-plugin.sh')
+
+        Execute(cmd, environment={'JAVA_HOME': params.java64_home}, logoutput=True)                    
+    else:
+        Logger.info('Ranger admin not installed')
+
+
+def write_properties_to_file(file_path, value):
+    for key in value:
+      modify_config(file_path, key, value[key])
+
+
+def modify_config(filepath, variable, setting):
+    var_found = False
+    already_set = False
+    V=str(variable)
+    S=str(setting)
+    # use quotes if setting has spaces #
+    if ' ' in S:
+        S = '%s' % S
+
+    for line in fileinput.input(filepath, inplace = 1):
+        # process lines that look like config settings #
+        if not line.lstrip(' ').startswith('#') and '=' in line:
+            _infile_var = str(line.split('=')[0].rstrip(' '))
+            _infile_set = str(line.split('=')[1].lstrip(' ').rstrip())
+            # only change the first matching occurrence #
+            if var_found == False and _infile_var.rstrip(' ') == V:
+                var_found = True
+                # don't change it if it is already set #
+                if _infile_set.lstrip(' ') == S:
+                    already_set = True
+                else:
+                    line = "%s=%s\n" % (V, S)
+
+        sys.stdout.write(line)
+
+    # Append the variable if it wasn't found #
+    if not var_found:
+        with open(filepath, "a") as f:
+            f.write("%s=%s\n" % (V, S))
+    elif already_set == True:
+        pass
+    else:
+        pass
+
+    return
+
+def ranger_hbase_properties(params):
+    ranger_hbase_properties = dict()
+
+    ranger_hbase_properties['POLICY_MGR_URL']           = params.config['configurations']['admin-properties']['policymgr_external_url']
+    ranger_hbase_properties['SQL_CONNECTOR_JAR']        = params.config['configurations']['admin-properties']['SQL_CONNECTOR_JAR']
+    ranger_hbase_properties['XAAUDIT.DB.FLAVOUR']       = params.config['configurations']['admin-properties']['DB_FLAVOR']
+    ranger_hbase_properties['XAAUDIT.DB.DATABASE_NAME'] = params.config['configurations']['admin-properties']['audit_db_name']
+    ranger_hbase_properties['XAAUDIT.DB.USER_NAME']     = params.config['configurations']['admin-properties']['audit_db_user']
+    ranger_hbase_properties['XAAUDIT.DB.PASSWORD']      = params.config['configurations']['admin-properties']['audit_db_password']
+    ranger_hbase_properties['XAAUDIT.DB.HOSTNAME']      = params.config['configurations']['admin-properties']['db_host']
+    ranger_hbase_properties['REPOSITORY_NAME']          = params.config['clusterName'] + '_hbase'
+
+    ranger_hbase_properties['XAAUDIT.DB.IS_ENABLED']   = params.config['configurations']['ranger-hbase-plugin-properties']['XAAUDIT.DB.IS_ENABLED']
+
+    ranger_hbase_properties['XAAUDIT.HDFS.IS_ENABLED'] = params.config['configurations']['ranger-hbase-plugin-properties']['XAAUDIT.HDFS.IS_ENABLED']
+    ranger_hbase_properties['XAAUDIT.HDFS.DESTINATION_DIRECTORY'] = params.config['configurations']['ranger-hbase-plugin-properties']['XAAUDIT.HDFS.DESTINATION_DIRECTORY']
+    ranger_hbase_properties['XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY'] = params.config['configurations']['ranger-hbase-plugin-properties']['XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY']
+    ranger_hbase_properties['XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY'] = params.config['configurations']['ranger-hbase-plugin-properties']['XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY']
+    ranger_hbase_properties['XAAUDIT.HDFS.DESTINTATION_FILE'] = params.config['configurations']['ranger-hbase-plugin-properties']['XAAUDIT.HDFS.DESTINTATION_FILE']
+    ranger_hbase_properties['XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS'] = params.config['configurations']['ranger-hbase-plugin-properties']['XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS']
+    ranger_hbase_properties['XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS'] = params.config['configurations']['ranger-hbase-plugin-properties']['XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS']
+    ranger_hbase_properties['XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS'] = params.config['configurations']['ranger-hbase-plugin-properties']['XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS']
+    ranger_hbase_properties['XAAUDIT.HDFS.LOCAL_BUFFER_FILE'] = params.config['configurations']['ranger-hbase-plugin-properties']['XAAUDIT.HDFS.LOCAL_BUFFER_FILE']
+    ranger_hbase_properties['XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS'] = params.config['configurations']['ranger-hbase-plugin-properties']['XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS']
+    ranger_hbase_properties['XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS'] = params.config['configurations']['ranger-hbase-plugin-properties']['XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS']
+    ranger_hbase_properties['XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT'] = params.config['configurations']['ranger-hbase-plugin-properties']['XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT']
+    
+
+    ranger_hbase_properties['SSL_KEYSTORE_FILE_PATH'] = params.config['configurations']['ranger-hbase-plugin-properties']['SSL_KEYSTORE_FILE_PATH']
+    ranger_hbase_properties['SSL_KEYSTORE_PASSWORD'] = params.config['configurations']['ranger-hbase-plugin-properties']['SSL_KEYSTORE_PASSWORD']
+    ranger_hbase_properties['SSL_TRUSTSTORE_FILE_PATH'] = params.config['configurations']['ranger-hbase-plugin-properties']['SSL_TRUSTSTORE_FILE_PATH']
+    ranger_hbase_properties['SSL_TRUSTSTORE_PASSWORD'] = params.config['configurations']['ranger-hbase-plugin-properties']['SSL_TRUSTSTORE_PASSWORD']
+    
+    ranger_hbase_properties['UPDATE_XAPOLICIES_ON_GRANT_REVOKE'] = params.config['configurations']['ranger-hbase-plugin-properties']['UPDATE_XAPOLICIES_ON_GRANT_REVOKE']
+
+    return ranger_hbase_properties    
+
+def hbase_repo_properties(params):
+
+    config_dict = dict()
+    config_dict['username'] = params.config['configurations']['ranger-hbase-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
+    config_dict['password'] = params.config['configurations']['ranger-hbase-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']
+    config_dict['hadoop.security.authentication'] = params.config['configurations']['core-site']['hadoop.security.authentication']
+    config_dict['hbase.security.authentication'] = params.config['configurations']['hbase-site']['hbase.security.authentication']
+    config_dict['hbase.zookeeper.property.clientPort'] = params.config['configurations']['hbase-site']['hbase.zookeeper.property.clientPort']
+    config_dict['hbase.zookeeper.quorum'] = params.config['configurations']['hbase-site']['hbase.zookeeper.quorum']
+    config_dict['zookeeper.znode.parent'] =  params.config['configurations']['hbase-site']['zookeeper.znode.parent']
+
+    if params.config['configurations']['cluster-env']['security_enabled']:
+        config_dict['hbase.master.kerberos.principal'] = params.config['configurations']['hbase-site']['hbase.master.kerberos.principal']
+    else:
+        config_dict['hbase.master.kerberos.principal'] = ''
+
+    repo= dict()
+    repo['isActive']                = "true"
+    repo['config']                  = json.dumps(config_dict)
+    repo['description']             = "hbase repo"
+    repo['name']                    = params.config['clusterName'] + "_hbase"
+    repo['repositoryType']          = "Hbase"
+    repo['assetType']               = '2'
+
+    data = json.dumps(repo)
+
+    return data

+ 1 - 0
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metainfo.xml

@@ -228,6 +228,7 @@
         <config-type>hadoop-env</config-type>
         <config-type>hadoop-policy</config-type>
         <config-type>hdfs-log4j</config-type>
+        <config-type>ranger-hdfs-plugin-properties</config-type>
       </configuration-dependencies>
     </service>
   </services>

+ 2 - 1
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py

@@ -36,7 +36,7 @@ from hdfs_namenode import namenode
 from hdfs import hdfs
 import hdfs_rebalance
 from utils import failover_namenode
-
+from setup_ranger_hdfs import setup_ranger_hdfs
 
 class NameNode(Script):
 
@@ -70,6 +70,7 @@ class NameNode(Script):
 
     env.set_params(params)
     self.configure(env)
+    setup_ranger_hdfs(env)
     namenode(action="start", rolling_restart=rolling_restart, env=env)
 
   def post_rolling_restart(self, env):

+ 13 - 0
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py

@@ -295,3 +295,16 @@ ttnode_heapsize = "1024m"
 dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
 mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
 mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+
+# ranger host
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+has_ranger_admin = not len(ranger_admin_hosts) == 0
+
+if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+    # setting flag value for ranger hdfs plugin
+    enable_ranger_hdfs = False
+    user_input = config['configurations']['ranger-hdfs-plugin-properties']['ranger-hdfs-plugin-enabled']
+    if  user_input.lower() == 'yes':
+      enable_ranger_hdfs = True
+    elif user_input.lower() == 'no':
+      enable_ranger_hdfs = False

+ 191 - 0
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py

@@ -0,0 +1,191 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import sys
+import fileinput
+import subprocess
+import json
+import re
+from resource_management import *
+from resource_management.libraries.functions.ranger_functions import Rangeradmin
+from resource_management.core.logger import Logger
+
+def setup_ranger_hdfs(env):
+    import params
+    env.set_params(params)
+
+    if params.has_ranger_admin:
+        try:
+            command = 'hdp-select status hadoop-client'
+            return_code, hdp_output = shell.call(command, timeout=20)
+        except Exception, e:
+            Logger.error(str(e))
+            raise Fail('Unable to execute hdp-select command to retrieve the version.')
+
+        if return_code != 0:
+            raise Fail('Unable to determine the current version because of a non-zero return code of {0}'.format(str(return_code)))
+
+        hdp_version = re.sub('hadoop-client - ', '', hdp_output)
+        match = re.match('[0-9]+.[0-9]+.[0-9]+.[0-9]+-[0-9]+', hdp_version)
+
+        if match is None:
+            raise Fail('Failed to get extracted version')
+
+        file_path = '/usr/hdp/'+ hdp_version +'/ranger-hdfs-plugin/install.properties'
+
+        ranger_hdfs_dict = ranger_hdfs_properties(params)
+        hdfs_repo_data = hdfs_repo_properties(params)        
+
+        write_properties_to_file(file_path, ranger_hdfs_dict)
+
+        if params.enable_ranger_hdfs:            
+            cmd = format('cd /usr/hdp/{hdp_version}/ranger-hdfs-plugin/ && sh enable-hdfs-plugin.sh')
+            ranger_adm_obj = Rangeradmin(url=ranger_hdfs_dict['POLICY_MGR_URL'])
+            response_code, response_recieved = ranger_adm_obj.check_ranger_login_urllib2(ranger_hdfs_dict['POLICY_MGR_URL'] + '/login.jsp', 'test:test')
+
+            if response_code is not None and response_code == 200:
+                repo = ranger_adm_obj.get_repository_by_name_urllib2(ranger_hdfs_dict['REPOSITORY_NAME'], 'hdfs', 'true', 'admin:admin')
+
+                if repo and repo['name'] == ranger_hdfs_dict['REPOSITORY_NAME']:
+                    Logger.info('HDFS Repository exist')
+                else:
+                    response = ranger_adm_obj.create_repository_urllib2(hdfs_repo_data, 'admin:admin')
+                    if response is not None:
+                        Logger.info('HDFS Repository created in Ranger Admin')
+                    else:
+                        Logger.info('HDFS Repository creation failed in Ranger Admin')
+            else:
+                Logger.info('Ranger service is not started on given host')
+        else:
+            cmd = format('cd /usr/hdp/{hdp_version}/ranger-hdfs-plugin/ && sh disable-hdfs-plugin.sh')
+
+        Execute(cmd, environment={'JAVA_HOME': params.java_home}, logoutput=True)            
+    else:
+        Logger.info('Ranger admin not installed')
+
+
+def write_properties_to_file(file_path, value):
+    for key in value:
+      modify_config(file_path, key, value[key])
+
+
+def modify_config(filepath, variable, setting):
+    var_found = False
+    already_set = False
+    V=str(variable)
+    S=str(setting)
+    # use quotes if setting has spaces #
+    if ' ' in S:
+        S = '%s' % S
+
+    for line in fileinput.input(filepath, inplace = 1):
+        # process lines that look like config settings #
+        if not line.lstrip(' ').startswith('#') and '=' in line:
+            _infile_var = str(line.split('=')[0].rstrip(' '))
+            _infile_set = str(line.split('=')[1].lstrip(' ').rstrip())
+            # only change the first matching occurrence #
+            if var_found == False and _infile_var.rstrip(' ') == V:
+                var_found = True
+                # don't change it if it is already set #
+                if _infile_set.lstrip(' ') == S:
+                    already_set = True
+                else:
+                    line = "%s=%s\n" % (V, S)
+
+        sys.stdout.write(line)
+
+    # Append the variable if it wasn't found #
+    if not var_found:
+        with open(filepath, "a") as f:
+            f.write("%s=%s\n" % (V, S))
+    elif already_set == True:
+        pass
+    else:
+        pass
+
+    return
+
+def ranger_hdfs_properties(params):
+    ranger_hdfs_properties = dict()
+
+    ranger_hdfs_properties['POLICY_MGR_URL']           = params.config['configurations']['admin-properties']['policymgr_external_url']
+    ranger_hdfs_properties['SQL_CONNECTOR_JAR']        = params.config['configurations']['admin-properties']['SQL_CONNECTOR_JAR']
+    ranger_hdfs_properties['XAAUDIT.DB.FLAVOUR']       = params.config['configurations']['admin-properties']['DB_FLAVOR']
+    ranger_hdfs_properties['XAAUDIT.DB.DATABASE_NAME'] = params.config['configurations']['admin-properties']['audit_db_name']
+    ranger_hdfs_properties['XAAUDIT.DB.USER_NAME']     = params.config['configurations']['admin-properties']['audit_db_user']
+    ranger_hdfs_properties['XAAUDIT.DB.PASSWORD']      = params.config['configurations']['admin-properties']['audit_db_password']
+    ranger_hdfs_properties['XAAUDIT.DB.HOSTNAME']      = params.config['configurations']['admin-properties']['db_host']
+    ranger_hdfs_properties['REPOSITORY_NAME']          = params.config['clusterName'] + '_hadoop'
+
+    ranger_hdfs_properties['XAAUDIT.DB.IS_ENABLED']   = params.config['configurations']['ranger-hdfs-plugin-properties']['XAAUDIT.DB.IS_ENABLED']
+
+    ranger_hdfs_properties['XAAUDIT.HDFS.IS_ENABLED'] = params.config['configurations']['ranger-hdfs-plugin-properties']['XAAUDIT.HDFS.IS_ENABLED']
+    ranger_hdfs_properties['XAAUDIT.HDFS.DESTINATION_DIRECTORY'] = params.config['configurations']['ranger-hdfs-plugin-properties']['XAAUDIT.HDFS.DESTINATION_DIRECTORY']
+    ranger_hdfs_properties['XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY'] = params.config['configurations']['ranger-hdfs-plugin-properties']['XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY']
+    ranger_hdfs_properties['XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY'] = params.config['configurations']['ranger-hdfs-plugin-properties']['XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY']
+    ranger_hdfs_properties['XAAUDIT.HDFS.DESTINTATION_FILE'] = params.config['configurations']['ranger-hdfs-plugin-properties']['XAAUDIT.HDFS.DESTINTATION_FILE']
+    ranger_hdfs_properties['XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS'] = params.config['configurations']['ranger-hdfs-plugin-properties']['XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS']
+    ranger_hdfs_properties['XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS'] = params.config['configurations']['ranger-hdfs-plugin-properties']['XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS']
+    ranger_hdfs_properties['XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS'] = params.config['configurations']['ranger-hdfs-plugin-properties']['XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS']
+    ranger_hdfs_properties['XAAUDIT.HDFS.LOCAL_BUFFER_FILE'] = params.config['configurations']['ranger-hdfs-plugin-properties']['XAAUDIT.HDFS.LOCAL_BUFFER_FILE']
+    ranger_hdfs_properties['XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS'] = params.config['configurations']['ranger-hdfs-plugin-properties']['XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS']
+    ranger_hdfs_properties['XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS'] = params.config['configurations']['ranger-hdfs-plugin-properties']['XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS']
+    ranger_hdfs_properties['XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT'] = params.config['configurations']['ranger-hdfs-plugin-properties']['XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT']
+    
+
+    ranger_hdfs_properties['SSL_KEYSTORE_FILE_PATH'] = params.config['configurations']['ranger-hdfs-plugin-properties']['SSL_KEYSTORE_FILE_PATH']
+    ranger_hdfs_properties['SSL_KEYSTORE_PASSWORD'] = params.config['configurations']['ranger-hdfs-plugin-properties']['SSL_KEYSTORE_PASSWORD']
+    ranger_hdfs_properties['SSL_TRUSTSTORE_FILE_PATH'] = params.config['configurations']['ranger-hdfs-plugin-properties']['SSL_TRUSTSTORE_FILE_PATH']
+    ranger_hdfs_properties['SSL_TRUSTSTORE_PASSWORD'] = params.config['configurations']['ranger-hdfs-plugin-properties']['SSL_TRUSTSTORE_PASSWORD']
+
+    return ranger_hdfs_properties
+
+
+def hdfs_repo_properties(params):
+
+    config_dict = dict()
+    config_dict['username'] = params.config['configurations']['ranger-hdfs-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
+    config_dict['password'] = params.config['configurations']['ranger-hdfs-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']
+    config_dict['hadoop.security.authentication'] = params.config['configurations']['core-site']['hadoop.security.authentication']
+    config_dict['hadoop.security.authorization'] = params.config['configurations']['core-site']['hadoop.security.authorization']
+    config_dict['fs.default.name'] = params.config['configurations']['core-site']['fs.defaultFS']
+    config_dict['hadoop.security.auth_to_local'] = params.config['configurations']['core-site']['hadoop.security.auth_to_local']
+    config_dict['hadoop.rpc.protection'] = params.config['configurations']['ranger-hdfs-plugin-properties']['hadoop.rpc.protection']
+    config_dict['commonNameForCertificate'] = params.config['configurations']['ranger-hdfs-plugin-properties']['common.name.for.certificate']
+
+    if params.config['configurations']['cluster-env']['security_enabled']:
+        config_dict['dfs.datanode.kerberos.principal'] = params.config['configurations']['hdfs-site']['dfs.datanode.kerberos.principal']
+        config_dict['dfs.namenode.kerberos.principal'] = params.config['configurations']['hdfs-site']['dfs.namenode.kerberos.principal']
+        config_dict['dfs.secondary.namenode.kerberos.principal'] = params.config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.principal']
+    else:
+        config_dict['dfs.datanode.kerberos.principal'] = ''
+        config_dict['dfs.namenode.kerberos.principal'] = ''
+        config_dict['dfs.secondary.namenode.kerberos.principal'] = ''
+
+    repo= dict()
+    repo['isActive']                = "true"
+    repo['config']                  = json.dumps(config_dict)
+    repo['description']             = "hdfs repo"
+    repo['name']                    = params.config['clusterName'] + "_hadoop"
+    repo['repositoryType']          = "Hdfs"
+    repo['assetType']               = '1'
+
+    data = json.dumps(repo)
+
+    return data

+ 1 - 0
ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/metainfo.xml

@@ -288,6 +288,7 @@
         <config-type>hive-env</config-type>
         <config-type>webhcat-site</config-type>
         <config-type>webhcat-env</config-type>
+        <config-type>ranger-hive-plugin-properties</config-type>
       </configuration-dependencies>
     </service>
   </services>

+ 2 - 1
ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server.py

@@ -27,6 +27,7 @@ from resource_management.libraries.functions.security_commons import build_expec
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
   FILE_TYPE_XML
 from install_jars import install_tez_jars
+from setup_ranger_hive import setup_ranger_hive
 
 class HiveServer(Script):
 
@@ -55,7 +56,7 @@ class HiveServer(Script):
     # This function is needed in HDP 2.2, but it is safe to call in earlier versions.
     copy_tarballs_to_hdfs('mapreduce', 'hive-server2', params.tez_user, params.hdfs_user, params.user_group)
     copy_tarballs_to_hdfs('tez', 'hive-server2', params.tez_user, params.hdfs_user, params.user_group)
-
+    setup_ranger_hive(env)    
     hive_service( 'hiveserver2', action = 'start',
       rolling_restart=rolling_restart )
 

+ 12 - 0
ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params.py

@@ -319,3 +319,15 @@ HdfsDirectory = functools.partial(
   kinit_path_local = kinit_path_local,
   bin_dir = hadoop_bin_dir
 )
+
+# ranger host
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+has_ranger_admin = not len(ranger_admin_hosts) == 0
+if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >=0:
+    # setting flag value for ranger hive plugin
+    enable_ranger_hive = False
+    user_input = config['configurations']['ranger-hive-plugin-properties']['ranger-hive-plugin-enabled']
+    if  user_input.lower() == 'yes':
+      enable_ranger_hive = True
+    elif user_input.lower() == 'no':
+      enable_ranger_hive = False

+ 182 - 0
ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/setup_ranger_hive.py

@@ -0,0 +1,182 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import sys
+import fileinput
+import subprocess
+import json
+import re
+from resource_management import *
+from resource_management.libraries.functions.ranger_functions import Rangeradmin
+from resource_management.core.logger import Logger
+
+def setup_ranger_hive(env):
+    import params
+    env.set_params(params)
+
+    if params.has_ranger_admin:
+        try:
+            command = 'hdp-select status hive-server2'
+            return_code, hdp_output = shell.call(command, timeout=20)
+        except Exception, e:
+            Logger.error(str(e))
+            raise Fail('Unable to execute hdp-select command to retrieve the version.')
+
+        if return_code != 0:
+            raise Fail('Unable to determine the current version because of a non-zero return code of {0}'.format(str(return_code)))
+
+        hdp_version = re.sub('hive-server2 - ', '', hdp_output)
+        match = re.match('[0-9]+.[0-9]+.[0-9]+.[0-9]+-[0-9]+', hdp_version)
+
+        if match is None:
+            raise Fail('Failed to get extracted version')
+
+        file_path = '/usr/hdp/'+ hdp_version +'/ranger-hive-plugin/install.properties'
+
+        ranger_hive_dict = ranger_hive_properties(params)
+        hive_repo_data = hive_repo_properties(params)
+
+        write_properties_to_file(file_path, ranger_hive_dict)
+    
+        if params.enable_ranger_hive:
+            cmd = format('cd /usr/hdp/{hdp_version}/ranger-hive-plugin/ && sh enable-hive-plugin.sh')
+            ranger_adm_obj = Rangeradmin(url=ranger_hive_dict['POLICY_MGR_URL'])
+            response_code, response_recieved = ranger_adm_obj.check_ranger_login_urllib2(ranger_hive_dict['POLICY_MGR_URL'] + '/login.jsp', 'test:test')
+
+            if response_code is not None and response_code == 200:
+                repo = ranger_adm_obj.get_repository_by_name_urllib2(ranger_hive_dict['REPOSITORY_NAME'], 'hive', 'true', 'admin:admin')
+
+                if repo and repo['name'] ==  ranger_hive_dict['REPOSITORY_NAME']:
+                    Logger.info('Hive Repository exist')
+                else:
+                    response = ranger_adm_obj.create_repository_urllib2(hive_repo_data, 'admin:admin')
+                    if response is not None:
+                        Logger.info('Hive Repository created in Ranger Admin')
+                    else:
+                        Logger.info('Hive Repository creation failed in Ranger Admin')
+            else:                        
+                Logger.info('Ranger service is not started on given host')
+        else:
+            cmd = format('cd /usr/hdp/{hdp_version}/ranger-hive-plugin/ && sh disable-hive-plugin.sh')
+        
+        Execute(cmd, environment={'JAVA_HOME': params.java64_home}, logoutput=True)
+    else:
+        Logger.info('Ranger admin not installed')
+
+
+def write_properties_to_file(file_path, value):
+    for key in value:
+      modify_config(file_path, key, value[key])
+
+
+def modify_config(filepath, variable, setting):
+    var_found = False
+    already_set = False
+    V=str(variable)
+    S=str(setting)
+    # use quotes if setting has spaces #
+    if ' ' in S:
+        S = '%s' % S
+
+    for line in fileinput.input(filepath, inplace = 1):
+        # process lines that look like config settings #
+        if not line.lstrip(' ').startswith('#') and '=' in line:
+            _infile_var = str(line.split('=')[0].rstrip(' '))
+            _infile_set = str(line.split('=')[1].lstrip(' ').rstrip())
+            # only change the first matching occurrence #
+            if var_found == False and _infile_var.rstrip(' ') == V:
+                var_found = True
+                # don't change it if it is already set #
+                if _infile_set.lstrip(' ') == S:
+                    already_set = True
+                else:
+                    line = "%s=%s\n" % (V, S)
+
+        sys.stdout.write(line)
+
+    # Append the variable if it wasn't found #
+    if not var_found:
+        with open(filepath, "a") as f:
+            f.write("%s=%s\n" % (V, S))
+    elif already_set == True:
+        pass
+    else:
+        pass
+
+    return
+
+def ranger_hive_properties(params):
+    ranger_hive_properties = dict()
+
+    ranger_hive_properties['POLICY_MGR_URL']           = params.config['configurations']['admin-properties']['policymgr_external_url']
+    ranger_hive_properties['SQL_CONNECTOR_JAR']        = params.config['configurations']['admin-properties']['SQL_CONNECTOR_JAR']
+    ranger_hive_properties['XAAUDIT.DB.FLAVOUR']       = params.config['configurations']['admin-properties']['DB_FLAVOR']
+    ranger_hive_properties['XAAUDIT.DB.DATABASE_NAME'] = params.config['configurations']['admin-properties']['audit_db_name']
+    ranger_hive_properties['XAAUDIT.DB.USER_NAME']     = params.config['configurations']['admin-properties']['audit_db_user']
+    ranger_hive_properties['XAAUDIT.DB.PASSWORD']      = params.config['configurations']['admin-properties']['audit_db_password']
+    ranger_hive_properties['XAAUDIT.DB.HOSTNAME']      = params.config['configurations']['admin-properties']['db_host']
+    ranger_hive_properties['REPOSITORY_NAME']          = params.config['clusterName'] + '_hive'
+
+    ranger_hive_properties['XAAUDIT.DB.IS_ENABLED']   = params.config['configurations']['ranger-hive-plugin-properties']['XAAUDIT.DB.IS_ENABLED']
+
+    ranger_hive_properties['XAAUDIT.HDFS.IS_ENABLED'] = params.config['configurations']['ranger-hive-plugin-properties']['XAAUDIT.HDFS.IS_ENABLED']
+    ranger_hive_properties['XAAUDIT.HDFS.DESTINATION_DIRECTORY'] = params.config['configurations']['ranger-hive-plugin-properties']['XAAUDIT.HDFS.DESTINATION_DIRECTORY']
+    ranger_hive_properties['XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY'] = params.config['configurations']['ranger-hive-plugin-properties']['XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY']
+    ranger_hive_properties['XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY'] = params.config['configurations']['ranger-hive-plugin-properties']['XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY']
+    ranger_hive_properties['XAAUDIT.HDFS.DESTINTATION_FILE'] = params.config['configurations']['ranger-hive-plugin-properties']['XAAUDIT.HDFS.DESTINTATION_FILE']
+    ranger_hive_properties['XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS'] = params.config['configurations']['ranger-hive-plugin-properties']['XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS']
+    ranger_hive_properties['XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS'] = params.config['configurations']['ranger-hive-plugin-properties']['XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS']
+    ranger_hive_properties['XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS'] = params.config['configurations']['ranger-hive-plugin-properties']['XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS']
+    ranger_hive_properties['XAAUDIT.HDFS.LOCAL_BUFFER_FILE'] = params.config['configurations']['ranger-hive-plugin-properties']['XAAUDIT.HDFS.LOCAL_BUFFER_FILE']
+    ranger_hive_properties['XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS'] = params.config['configurations']['ranger-hive-plugin-properties']['XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS']
+    ranger_hive_properties['XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS'] = params.config['configurations']['ranger-hive-plugin-properties']['XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS']
+    ranger_hive_properties['XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT'] = params.config['configurations']['ranger-hive-plugin-properties']['XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT']
+    
+
+    ranger_hive_properties['SSL_KEYSTORE_FILE_PATH'] = params.config['configurations']['ranger-hive-plugin-properties']['SSL_KEYSTORE_FILE_PATH']
+    ranger_hive_properties['SSL_KEYSTORE_PASSWORD'] = params.config['configurations']['ranger-hive-plugin-properties']['SSL_KEYSTORE_PASSWORD']
+    ranger_hive_properties['SSL_TRUSTSTORE_FILE_PATH'] = params.config['configurations']['ranger-hive-plugin-properties']['SSL_TRUSTSTORE_FILE_PATH']
+    ranger_hive_properties['SSL_TRUSTSTORE_PASSWORD'] = params.config['configurations']['ranger-hive-plugin-properties']['SSL_TRUSTSTORE_PASSWORD']
+
+    ranger_hive_properties['UPDATE_XAPOLICIES_ON_GRANT_REVOKE'] = params.config['configurations']['ranger-hive-plugin-properties']['UPDATE_XAPOLICIES_ON_GRANT_REVOKE']
+
+    return ranger_hive_properties
+
+def hive_repo_properties(params):
+
+    hive_host = params.config['clusterHostInfo']['hive_server_host'][0]
+
+    config_dict = dict()
+    config_dict['username'] = params.config['configurations']['ranger-hive-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
+    config_dict['password'] = params.config['configurations']['ranger-hive-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']
+    config_dict['jdbc.driverClassName'] = params.config['configurations']['ranger-hive-plugin-properties']['jdbc.driverClassName']
+    config_dict['jdbc.url'] = 'jdbc:hive2://' + hive_host + ':10000'
+    config_dict['commonNameForCertificate'] = params.config['configurations']['ranger-hive-plugin-properties']['common.name.for.certificate']
+
+    repo= dict()
+    repo['isActive']                = "true"
+    repo['config']                  = json.dumps(config_dict)
+    repo['description']             = "hive repo"
+    repo['name']                    = params.config['clusterName'] + '_hive'
+    repo['repositoryType']          = "Hive"
+    repo['assetType']               = '3'
+
+    data = json.dumps(repo)
+
+    return data

+ 156 - 0
ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/ranger-knox-plugin-properties.xml

@@ -0,0 +1,156 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+
+	<property>
+		<name>common.name.for.certificate</name>
+		<value>-</value>
+		<description>Used for repository creation on ranger admin</description>
+	</property>
+
+    <property>
+        <name>ranger-knox-plugin-enabled</name>
+        <value>No</value>
+        <description>Enable ranger knox plugin ?</description>
+    </property>
+
+	<property>
+		<name>REPOSITORY_CONFIG_USERNAME</name>
+		<value>admin</value>
+		<description>Used for repository creation on ranger admin</description>
+	</property>	
+
+	<property>
+		<name>REPOSITORY_CONFIG_PASSWORD</name>
+		<value>admin-password</value>
+		<property-type>PASSWORD</property-type>
+		<description>Used for repository creation on ranger admin</description>
+	</property>	
+
+	<property>
+		<name>KNOX_HOME</name>
+		<value>/usr/hdp/current/knox-server</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.DB.IS_ENABLED</name>
+		<value>true</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.IS_ENABLED</name>
+		<value>false</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINATION_DIRECTORY</name>
+		<value>hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY</name>
+		<value>__REPLACE__LOG_DIR/hadoop/%app-type%/audit</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY</name>
+		<value>__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_FILE</name>
+		<value>%hostname%-audit.log</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS</name>
+		<value>900</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS</name>
+		<value>86400</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS</name>
+		<value>60</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_FILE</name>
+		<value>%time:yyyyMMdd-HHmm.ss%.log</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS</name>
+		<value>60</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS</name>
+		<value>600</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT</name>
+		<value>10</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_KEYSTORE_FILE_PATH</name>
+		<value>/etc/hadoop/conf/ranger-plugin-keystore.jks</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_KEYSTORE_PASSWORD</name>
+		<value>myKeyFilePassword</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_TRUSTSTORE_FILE_PATH</name>
+		<value>/etc/hadoop/conf/ranger-plugin-truststore.jks</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_TRUSTSTORE_PASSWORD</name>
+		<value>changeit</value>
+		<description></description>
+	</property>
+
+</configuration>

+ 1 - 0
ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/metainfo.xml

@@ -81,6 +81,7 @@
         <config-type>gateway-site</config-type>
         <config-type>gateway-log4j</config-type>
         <config-type>topology</config-type>
+        <config-type>ranger-knox-plugin-properties</config-type>
       </configuration-dependencies>
     </service>
   </services>

+ 2 - 0
ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/knox_gateway.py

@@ -26,6 +26,7 @@ import upgrade
 
 from knox import knox
 from ldap import ldap
+from setup_ranger_knox import setup_ranger_knox
 
 class KnoxGateway(Script):
 
@@ -62,6 +63,7 @@ class KnoxGateway(Script):
     self.configure(env)
     daemon_cmd = format('{knox_bin} start')
     no_op_test = format('ls {knox_pid_file} >/dev/null 2>&1 && ps -p `cat {knox_pid_file}` >/dev/null 2>&1')
+    setup_ranger_knox(env)
     Execute(daemon_cmd,
             user=params.knox_user,
             environment={'JAVA_HOME': params.java_home},

+ 12 - 0
ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params.py

@@ -145,4 +145,16 @@ if security_enabled:
   _hostname_lowercase = config['hostname'].lower()
   knox_principal_name = config['configurations']['knox-env']['knox_principal_name'].replace('_HOST',_hostname_lowercase)
 
+# ranger host
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+has_ranger_admin = not len(ranger_admin_hosts) == 0
 
+if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+    # Setting Flag value for ranger hbase plugin
+    enable_ranger_knox = False
+    user_input = config['configurations']['ranger-knox-plugin-properties']['ranger-knox-plugin-enabled']
+    if user_input.lower() == 'yes':
+      enable_ranger_knox = True
+    elif user_input.lower() == 'no':
+      enable_ranger_knox = False
+      

+ 183 - 0
ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/setup_ranger_knox.py

@@ -0,0 +1,183 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import sys
+import fileinput
+import subprocess
+import json
+import re
+from resource_management import *
+from resource_management.libraries.functions.ranger_functions import Rangeradmin
+from resource_management.core.logger import Logger
+
+def setup_ranger_knox(env):
+    import params
+    env.set_params(params)
+
+    if params.has_ranger_admin:
+        try:
+            command = 'hdp-select status knox-server'
+            return_code, hdp_output = shell.call(command, timeout=20)
+        except Exception, e:
+            Logger.error(str(e))
+            raise Fail('Unable to execute hdp-select command to retrieve the version.')
+
+        if return_code != 0:
+            raise Fail('Unable to determine the current version because of a non-zero return code of {0}'.format(str(return_code)))
+
+        hdp_version = re.sub('knox-server - ', '', hdp_output)
+        match = re.match('[0-9]+.[0-9]+.[0-9]+.[0-9]+-[0-9]+', hdp_version)
+
+        if match is None:
+            raise Fail('Failed to get extracted version')
+
+        file_path = '/usr/hdp/'+ hdp_version +'/ranger-knox-plugin/install.properties'
+
+        ranger_knox_dict = ranger_knox_properties(params)
+        knox_repo_data = knox_repo_properties(params)       
+
+        write_properties_to_file(file_path, ranger_knox_dict)
+
+        if params.enable_ranger_knox:
+            cmd = format('cd /usr/hdp/{hdp_version}/ranger-knox-plugin/ && sh enable-knox-plugin.sh')
+            ranger_adm_obj = Rangeradmin(url=ranger_knox_dict['POLICY_MGR_URL'])
+            response_code, response_recieved = ranger_adm_obj.check_ranger_login_urllib2(ranger_knox_dict['POLICY_MGR_URL'] + '/login.jsp', 'test:test')
+
+            if response_code is not None and response_code == 200:
+                repo = ranger_adm_obj.get_repository_by_name_urllib2(ranger_knox_dict['REPOSITORY_NAME'], 'knox', 'true', 'admin:admin')
+
+                if repo and repo['name'] == ranger_knox_dict['REPOSITORY_NAME']:
+                    Logger.info('Knox Repository exist')
+                else:
+                    response = ranger_adm_obj.create_repository_urllib2(knox_repo_data, 'admin:admin')
+                    if response is not None:
+                        Logger.info('Knox Repository created in Ranger Admin')
+                    else:
+                        Logger.info('Knox Repository creation failed in Ranger Admin')
+            else:
+                Logger.info('Ranger service is not started on given host')
+        else:
+            cmd = format('cd /usr/hdp/{hdp_version}/ranger-knox-plugin/ && sh disable-knox-plugin.sh')
+
+        Execute(cmd, environment={'JAVA_HOME': params.java_home}, logoutput=True)
+    else:
+        Logger.info('Ranger admin not installed') 
+
+
+def write_properties_to_file(file_path, value):
+    for key in value:
+      modify_config(file_path, key, value[key])
+
+
+def modify_config(filepath, variable, setting):
+    var_found = False
+    already_set = False
+    V=str(variable)
+    S=str(setting)
+    # use quotes if setting has spaces #
+    if ' ' in S:
+        S = '%s' % S
+
+    for line in fileinput.input(filepath, inplace = 1):
+        # process lines that look like config settings #
+        if not line.lstrip(' ').startswith('#') and '=' in line:
+            _infile_var = str(line.split('=')[0].rstrip(' '))
+            _infile_set = str(line.split('=')[1].lstrip(' ').rstrip())
+            # only change the first matching occurrence #
+            if var_found == False and _infile_var.rstrip(' ') == V:
+                var_found = True
+                # don't change it if it is already set #
+                if _infile_set.lstrip(' ') == S:
+                    already_set = True
+                else:
+                    line = "%s=%s\n" % (V, S)
+
+        sys.stdout.write(line)
+
+    # Append the variable if it wasn't found #
+    if not var_found:
+        with open(filepath, "a") as f:
+            f.write("%s=%s\n" % (V, S))
+    elif already_set == True:
+        pass
+    else:
+        pass
+
+    return
+
+def ranger_knox_properties(params):
+    ranger_knox_properties = dict()
+
+    ranger_knox_properties['POLICY_MGR_URL']           = params.config['configurations']['admin-properties']['policymgr_external_url']
+    ranger_knox_properties['SQL_CONNECTOR_JAR']        = params.config['configurations']['admin-properties']['SQL_CONNECTOR_JAR']
+    ranger_knox_properties['XAAUDIT.DB.FLAVOUR']       = params.config['configurations']['admin-properties']['DB_FLAVOR']
+    ranger_knox_properties['XAAUDIT.DB.DATABASE_NAME'] = params.config['configurations']['admin-properties']['audit_db_name']
+    ranger_knox_properties['XAAUDIT.DB.USER_NAME']     = params.config['configurations']['admin-properties']['audit_db_user']
+    ranger_knox_properties['XAAUDIT.DB.PASSWORD']      = params.config['configurations']['admin-properties']['audit_db_password']
+    ranger_knox_properties['XAAUDIT.DB.HOSTNAME']      = params.config['configurations']['admin-properties']['db_host']
+    ranger_knox_properties['REPOSITORY_NAME']          = params.config['clusterName'] + '_knox'
+
+    ranger_knox_properties['KNOX_HOME'] = params.config['configurations']['ranger-knox-plugin-properties']['KNOX_HOME']
+
+    ranger_knox_properties['XAAUDIT.DB.IS_ENABLED']   = params.config['configurations']['ranger-knox-plugin-properties']['XAAUDIT.DB.IS_ENABLED']
+
+    ranger_knox_properties['XAAUDIT.HDFS.IS_ENABLED'] = params.config['configurations']['ranger-knox-plugin-properties']['XAAUDIT.HDFS.IS_ENABLED']
+    ranger_knox_properties['XAAUDIT.HDFS.DESTINATION_DIRECTORY'] = params.config['configurations']['ranger-knox-plugin-properties']['XAAUDIT.HDFS.DESTINATION_DIRECTORY']
+    ranger_knox_properties['XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY'] = params.config['configurations']['ranger-knox-plugin-properties']['XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY']
+    ranger_knox_properties['XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY'] = params.config['configurations']['ranger-knox-plugin-properties']['XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY']
+    ranger_knox_properties['XAAUDIT.HDFS.DESTINTATION_FILE'] = params.config['configurations']['ranger-knox-plugin-properties']['XAAUDIT.HDFS.DESTINTATION_FILE']
+    ranger_knox_properties['XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS'] = params.config['configurations']['ranger-knox-plugin-properties']['XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS']
+    ranger_knox_properties['XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS'] = params.config['configurations']['ranger-knox-plugin-properties']['XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS']
+    ranger_knox_properties['XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS'] = params.config['configurations']['ranger-knox-plugin-properties']['XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS']
+    ranger_knox_properties['XAAUDIT.HDFS.LOCAL_BUFFER_FILE'] = params.config['configurations']['ranger-knox-plugin-properties']['XAAUDIT.HDFS.LOCAL_BUFFER_FILE']
+    ranger_knox_properties['XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS'] = params.config['configurations']['ranger-knox-plugin-properties']['XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS']
+    ranger_knox_properties['XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS'] = params.config['configurations']['ranger-knox-plugin-properties']['XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS']
+    ranger_knox_properties['XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT'] = params.config['configurations']['ranger-knox-plugin-properties']['XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT']
+    
+
+    ranger_knox_properties['SSL_KEYSTORE_FILE_PATH'] = params.config['configurations']['ranger-knox-plugin-properties']['SSL_KEYSTORE_FILE_PATH']
+    ranger_knox_properties['SSL_KEYSTORE_PASSWORD'] = params.config['configurations']['ranger-knox-plugin-properties']['SSL_KEYSTORE_PASSWORD']
+    ranger_knox_properties['SSL_TRUSTSTORE_FILE_PATH'] = params.config['configurations']['ranger-knox-plugin-properties']['SSL_TRUSTSTORE_FILE_PATH']
+    ranger_knox_properties['SSL_TRUSTSTORE_PASSWORD'] = params.config['configurations']['ranger-knox-plugin-properties']['SSL_TRUSTSTORE_PASSWORD']
+    
+
+    return ranger_knox_properties    
+
+def knox_repo_properties(params):
+
+    knoxHost = params.config['clusterHostInfo']['knox_gateway_hosts'][0]
+    knoxPort = params.config['configurations']['gateway-site']['gateway.port']
+
+    config_dict = dict()
+    config_dict['username'] = params.config['configurations']['ranger-knox-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
+    config_dict['password'] = params.config['configurations']['ranger-knox-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
+    config_dict['knox.url'] = 'https://' + knoxHost + ':' + str(knoxPort) +'/gateway/admin/api/v1/topologies'
+    config_dict['commonNameForCertificate'] = params.config['configurations']['ranger-knox-plugin-properties']['common.name.for.certificate']
+
+    repo= dict()
+    repo['isActive']                = "true"
+    repo['config']                  = json.dumps(config_dict)
+    repo['description']             = "knox repo"
+    repo['name']                    = params.config['clusterName'] + "_knox"
+    repo['repositoryType']          = "Knox"
+    repo['assetType']               = '5'
+
+    data = json.dumps(repo)
+
+    return data

+ 180 - 0
ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/admin-properties.xml

@@ -0,0 +1,180 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false">
+
+	<property>
+		<name>DB_FLAVOR</name>
+		<value>MYSQL</value>
+		<description>The database type to be used (mysql/oracle)</description>
+	</property>
+
+        <property>
+                <name>SQL_COMMAND_INVOKER</name>
+                <value>mysql</value>
+                <description>The executable path to be used to invoke command-line MYSQL</description>
+        </property>
+
+        <property>
+                <name>SQL_CONNECTOR_JAR</name>
+                <value>/usr/share/java/mysql-connector-java.jar</value>
+                <description>Location of DB client library (please check the location of the jar file)</description>
+        </property>
+
+        <property>
+                <name>db_root_user</name>
+                <value>root</value>
+                <property-type>USER</property-type>
+                <description>Database admin user</description>
+        </property>
+
+        <property require-input="true">
+                <name>db_root_password</name>
+                <value>vagrant</value>
+                <property-type>PASSWORD</property-type>
+                <description>Database password for the database admin user-id</description>
+        </property>
+
+        <property>
+                <name>db_host</name>
+                <value>localhost</value>
+                <description>Database host</description>
+        </property>
+
+        <property>
+                <name>db_name</name>
+                <value>ranger</value>
+                <description>Database name</description>
+        </property>
+
+        <property>
+                <name>db_user</name>
+                <value>rangeradmin</value>
+                <property-type>USER</property-type>
+                <description>Database user-id used for the XASecure schema</description>
+        </property>
+
+        <property require-input="true">
+                <name>db_password</name>
+                <value>rangeradmin</value>
+                <property-type>PASSWORD</property-type>
+                <description>Database password for the XASecure schema</description>
+        </property>
+
+        <property>
+                <name>audit_db_name</name>
+                <value>ranger_audit</value>
+                <description>Audit database name</description>
+        </property>
+
+        <property>
+                <name>audit_db_user</name>
+                <value>rangerlogger</value>
+                <property-type>USER</property-type>
+                <description>Database user-id for storing auditlog information</description>
+        </property>
+
+        <property require-input="true">
+                <name>audit_db_password</name>
+                <value>rangerlogger</value>
+                <property-type>PASSWORD</property-type>
+                <description>Database password for storing auditlog information</description>
+        </property>
+
+        <property>
+                <name>policymgr_external_url</name>
+                <value>http://localhost:6080</value>
+                <description>Policy Manager external url</description>
+        </property>
+
+        <property>
+                <name>policymgr_http_enabled</name>
+                <value>true</value>
+                <description>HTTP Enabled</description>
+        </property>
+
+        <property>
+                <name>authentication_method</name>
+                <value>UNIX</value>
+                <description></description>
+        </property>
+
+        <property>
+                <name>remoteLoginEnabled</name>
+                <value>true</value>
+                <description></description>
+        </property>
+
+        <property>
+                <name>authServiceHostName</name>
+                <value>localhost</value>
+                <description></description>
+        </property>
+
+        <property>
+                <name>authServicePort</name>
+                <value>5151</value>
+                <description></description>
+        </property>
+
+        <property>
+                <name>xa_ldap_url</name>
+                <value>"ldap://71.127.43.33:389"</value>
+                <description></description>
+        </property>
+
+        <property>
+                <name>xa_ldap_userDNpattern</name>
+                <value>"uid={0},ou=users,dc=xasecure,dc=net"</value>
+                <description></description>
+        </property>
+
+        <property>
+                <name>xa_ldap_groupSearchBase</name>
+                <value>"ou=groups,dc=xasecure,dc=net"</value>
+                <description></description>
+        </property>
+
+        <property>
+                <name>xa_ldap_groupSearchFilter</name>
+                <value>"(member=uid={0},ou=users,dc=xasecure,dc=net)"</value>
+                <description></description>
+        </property>
+
+        <property>
+                <name>xa_ldap_groupRoleAttribute</name>
+                <value>"cn"</value>
+                <description></description>
+        </property>
+
+        <property>
+                <name>xa_ldap_ad_domain</name>
+                <value>"xasecure.net"</value>
+                <description></description>
+        </property>
+
+        <property>
+                <name>xa_ldap_ad_url</name>
+                <value>"ldap://ad.xasecure.net:389"</value>
+                <description></description>
+        </property>
+
+</configuration>

+ 49 - 0
ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/ranger-env.xml

@@ -0,0 +1,49 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+
+    <property>
+        <name>ranger_user</name>
+        <value>ranger</value>
+        <property-type>USER</property-type>
+        <description>Ranger username</description>
+    </property>
+
+    <property>
+        <name>ranger_group</name>
+        <value>ranger</value>
+        <property-type>GROUP</property-type>
+        <description>Ranger group</description>
+    </property>
+
+    <property>
+        <name>ranger_admin_log_dir</name>
+        <value>/var/log/ranger/admin</value>
+        <description></description>
+    </property>
+
+    <property>
+        <name>ranger_usersync_log_dir</name>
+        <value>/var/log/ranger/usersync</value>
+        <description></description>
+    </property>    
+
+</configuration>

+ 103 - 0
ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/usersync-properties.xml

@@ -0,0 +1,103 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false">
+	<property>
+		<name>SYNC_SOURCE</name>
+		<value>unix</value>
+		<description></description>
+	</property>
+	<property>
+		<name>MIN_UNIX_USER_ID_TO_SYNC</name>
+		<value>1000</value>
+		<description></description>
+	</property>
+	<property>
+		<name>SYNC_INTERVAL</name>
+		<value>1</value>
+		<description></description>
+	</property>
+	<property>
+		<name>SYNC_LDAP_URL</name>
+		<value>ldap://localhost:389</value>
+		<description>a sample value would be:  ldap://ldap.example.com:389</description>
+	</property>
+	<property>
+		<name>SYNC_LDAP_BIND_DN</name>
+		<value>cn=admin,dc=xasecure,dc=net</value>
+		<description>a sample value would be cn=admin,ou=users,dc=hadoop,dc=apache,dc-org</description>
+	</property>
+	<property>
+		<name>SYNC_LDAP_BIND_PASSWORD</name>
+		<value>admin321</value>
+		<description></description>
+	</property>
+	<property>
+		<name>CRED_KEYSTORE_FILENAME</name>
+		<value>/usr/lib/xausersync/.jceks/xausersync.jceks</value>
+		<description></description>
+	</property>
+	<property>
+		<name>SYNC_LDAP_USER_SEARCH_BASE</name>
+		<value>ou=users,dc=xasecure,dc=net</value>
+		<description>sample value would be ou=users,dc=hadoop,dc=apache,dc=org</description>
+	</property>
+	<property>
+		<name>SYNC_LDAP_USER_SEARCH_SCOPE</name>
+		<value>sub</value>
+		<description>default value: sub</description>
+	</property>
+	<property>
+		<name>SYNC_LDAP_USER_OBJECT_CLASS</name>
+		<value>person</value>
+		<description>default value: person</description>
+	</property>
+	<property>
+		<name>SYNC_LDAP_USER_SEARCH_FILTER</name>
+		<value>-</value>
+		<description>default value is empty</description>
+	</property>
+	<property>
+		<name>SYNC_LDAP_USER_NAME_ATTRIBUTE</name>
+		<value>cn</value>
+		<description>default value: cn</description>
+	</property>
+	<property>
+		<name>SYNC_LDAP_USER_GROUP_NAME_ATTRIBUTE</name>
+		<value>memberof,ismemberof</value>
+		<description></description>
+	</property>
+	<property>
+		<name>SYNC_LDAP_USERNAME_CASE_CONVERSION</name>
+		<value>lower</value>
+		<description>possible values:  none, lower, upper</description>
+	</property>
+	<property>
+		<name>SYNC_LDAP_GROUPNAME_CASE_CONVERSION</name>
+		<value>lower</value>
+		<description>possible values:  none, lower, upper</description>
+	</property>
+	<property>
+		<name>logdir</name>
+		<value>logs</value>
+		<description>user sync log path</description>
+	</property>
+</configuration>

+ 84 - 0
ambari-server/src/main/resources/common-services/RANGER/0.4.0/metainfo.xml

@@ -0,0 +1,84 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<metainfo>
+    <schemaVersion>2.0</schemaVersion>
+    <services>
+        <service>
+            <name>RANGER</name>
+            <displayName>Ranger</displayName>
+            <comment>Comprehensive security for Hadoop</comment>
+            <version>0.4.0</version>
+            <components>
+                
+                <component>
+                    <name>RANGER_ADMIN</name>
+                    <displayName>Ranger Admin</displayName>
+                    <category>MASTER</category>
+                    <cardinality>1</cardinality>
+                    <commandScript>
+                        <script>scripts/ranger_admin.py</script>
+                        <scriptType>PYTHON</scriptType>
+                        <timeout>600</timeout>
+                    </commandScript>
+                </component>
+
+                <component>
+                    <name>RANGER_USERSYNC</name>
+                    <displayName>Ranger Usersync</displayName>
+                    <category>MASTER</category>
+                    <cardinality>1</cardinality>
+                    <commandScript>
+                        <script>scripts/ranger_usersync.py</script>
+                        <scriptType>PYTHON</scriptType>
+                        <timeout>600</timeout>
+                    </commandScript>           
+                </component>
+
+            </components>              
+
+            <osSpecifics>
+                <osSpecific>
+                    <osFamily>redhat5,redhat6,suse11,ubuntu12</osFamily>
+                    <packages>
+                        <package>
+                            <name>ranger-admin</name>                                
+                        </package>
+                        <package>
+                            <name>ranger-usersync</name>
+                        </package>                           
+                    </packages>                        
+                </osSpecific>
+            </osSpecifics>
+
+            <configuration-dependencies>
+                <config-type>admin-properties</config-type>
+                <config-type>usersync-properties</config-type>
+            </configuration-dependencies>
+
+            <commandScript>
+                <script>scripts/service_check.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>300</timeout>				
+            </commandScript>
+
+        </service>
+    </services>
+</metainfo>

+ 43 - 0
ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py

@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
+from resource_management import *
+
+config  = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+hdp_stack_version         = str(config['hostLevelParams']['stack_version'])
+hdp_stack_version         = format_hdp_stack_version(hdp_stack_version)
+stack_is_hdp22_or_further = hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0
+
+if stack_is_hdp22_or_further:
+	ranger_home    = '/usr/hdp/current/ranger-admin'
+	ranger_stop    = '/usr/bin/ranger-admin-stop'
+	ranger_start   = '/usr/bin/ranger-admin-start'
+	usersync_home  = '/usr/hdp/current/ranger-usersync'
+	usersync_start = '/usr/bin/ranger-usersync-start'
+	usersync_stop  = '/usr/bin/ranger-usersync-stop'
+else:
+	pass
+
+java_home = config['hostLevelParams']['java_home']
+unix_user  = default("/configurations/ranger-env/ranger_user", "ranger")
+unix_group = default("/configurations/ranger-env/ranger_group", "ranger")

+ 58 - 0
ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py

@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from resource_management.core.exceptions import ComponentIsNotRunning
+from resource_management.core.logger import Logger
+from resource_management.core import shell
+from setup_ranger import setup_ranger
+
+class RangerAdmin(Script):
+    def install(self, env):
+        self.install_packages(env)
+        setup_ranger(env)
+
+    def stop(self, env):
+        import params
+        env.set_params(params)
+        Execute(format('{params.ranger_stop}'))
+
+    def start(self, env):
+        import params
+        setup_ranger(env)
+        Execute(format('{params.ranger_start}'))
+     
+    def status(self, env):
+        cmd = 'ps -ef | grep proc_rangeradmin | grep -v grep'
+        code, output = shell.call(cmd, timeout=20)
+
+        if code != 0:
+            Logger.debug('Ranger admin process not running')
+            raise ComponentIsNotRunning()
+        pass 
+
+    def configure(self, env):
+        import params
+        env.set_params(params)
+
+
+if __name__ == "__main__":
+  RangerAdmin().execute()

+ 56 - 0
ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_usersync.py

@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import sys
+from resource_management import *
+from resource_management.core.exceptions import ComponentIsNotRunning
+from resource_management.core.logger import Logger
+from resource_management.core import shell
+from setup_ranger import setup_usersync
+
+class RangerUsersync(Script):
+    def install(self, env):
+        self.install_packages(env)
+        setup_usersync(env)        
+
+    def stop(self, env):
+        import params
+        Execute(format('{params.usersync_stop}'))
+
+    def start(self, env):
+        import params
+        setup_usersync(env)
+        Execute(format('{params.usersync_start}'))
+     
+    def status(self, env):
+        cmd = 'ps -ef | grep proc_rangerusersync | grep -v grep'
+        code, output = shell.call(cmd, timeout=20)        
+
+        if code != 0:
+            Logger.debug('Ranger usersync process not running')
+            raise ComponentIsNotRunning()
+        pass
+
+    def configure(self, env):
+        import params
+        env.set_params(params)
+
+
+if __name__ == "__main__":
+  RangerUsersync().execute()

+ 53 - 0
ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/service_check.py

@@ -0,0 +1,53 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+class RangerServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    self.check_ranger_admin_service()
+    self.check_ranger_usersync_service()
+    
+  def check_ranger_admin_service(self):
+    cmd = 'ps -ef | grep proc_rangeradmin | grep -v grep'
+    code, output = shell.call(cmd, timeout=20)
+    if code == 0:
+      Logger.info('Ranger admin process up and running')
+    else:
+      Logger.debug('Ranger admin process not running')
+      raise ComponentIsNotRunning()
+  pass
+
+
+  def check_ranger_usersync_service(self):
+    cmd = 'ps -ef | grep proc_rangerusersync | grep -v grep'
+    code, output = shell.call(cmd, timeout=20)
+    if code == 0:
+      Logger.info('Ranger usersync process up and running')
+    else:
+      Logger.debug('Ranger usersync process not running')
+      raise ComponentIsNotRunning()
+  pass
+
+
+if __name__ == "__main__":
+  RangerServiceCheck().execute()

+ 153 - 0
ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger.py

@@ -0,0 +1,153 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+import fileinput
+import shutil
+import os
+from resource_management import *
+from resource_management.core.logger import Logger
+
+def setup_ranger(env):
+    import params
+    env.set_params(params)
+
+    if check_db_connnection(env):
+        file_path = params.ranger_home + '/install.properties'
+
+        if os.path.isfile(file_path):
+            shutil.copyfile(file_path, params.ranger_home + '/install-bk.properties')
+        else:
+            raise Fail('Ranger admin install.properties file doesnot exist')
+
+        write_properties_to_file(file_path, params.config['configurations']['admin-properties'])
+    
+        cmd = format('cd {ranger_home} && {ranger_home}/setup.sh')
+
+        try:
+           opt = Execute(cmd, environment={'JAVA_HOME': params.java_home}, logoutput=True)
+        except Exception, e:
+            if os.path.isfile(params.ranger_home + '/install-bk.properties'):
+                os.remove(file_path)
+                os.rename(params.ranger_home + '/install-bk.properties', file_path)
+            raise Fail('Ranger installation Failed, {0}'.format(str(e)))
+
+        if os.path.isfile(params.ranger_home + '/install-bk.properties'):
+            os.remove(file_path)
+            os.rename(params.ranger_home + '/install-bk.properties', file_path)
+        else:
+            raise Fail('Ranger admin install.properties backup file doesnot exist')
+        
+def setup_usersync(env):
+    import params
+    env.set_params(params)
+
+    file_path = params.usersync_home + '/install.properties'
+    write_properties_to_file(file_path, usersync_properties(params))
+    
+    cmd = format('cd {usersync_home} && {usersync_home}/setup.sh')
+    Execute(cmd, environment={'JAVA_HOME': params.java_home}, logoutput=True)
+
+def write_properties_to_file(file_path, value):
+    for key in value:
+      modify_config(file_path, key, value[key])        
+
+def modify_config(filepath, variable, setting):
+    var_found = False
+    already_set = False
+    V=str(variable)
+    S=str(setting)
+
+    if ' ' in S:
+        S = '%s' % S
+
+    for line in fileinput.input(filepath, inplace = 1):
+        if not line.lstrip(' ').startswith('#') and '=' in line:
+            _infile_var = str(line.split('=')[0].rstrip(' '))
+            _infile_set = str(line.split('=')[1].lstrip(' ').rstrip())
+            if var_found == False and _infile_var.rstrip(' ') == V:
+                var_found = True
+                if _infile_set.lstrip(' ') == S:
+                    already_set = True
+                else:
+                    line = "%s=%s\n" % (V, S)
+
+        sys.stdout.write(line)
+
+    if not var_found:
+        with open(filepath, "a") as f:
+            f.write("%s=%s\n" % (V, S))
+    elif already_set == True:
+        pass
+    else:
+        pass
+
+    return
+
+def usersync_properties(params):
+    d = dict()
+
+    d['POLICY_MGR_URL'] = params.config['configurations']['admin-properties']['policymgr_external_url']
+    
+    d['SYNC_SOURCE'] = params.config['configurations']['usersync-properties']['SYNC_SOURCE']
+    d['MIN_UNIX_USER_ID_TO_SYNC'] = params.config['configurations']['usersync-properties']['MIN_UNIX_USER_ID_TO_SYNC']
+    d['SYNC_INTERVAL'] = params.config['configurations']['usersync-properties']['SYNC_INTERVAL']
+    d['SYNC_LDAP_URL'] = params.config['configurations']['usersync-properties']['SYNC_LDAP_URL']
+    d['SYNC_LDAP_BIND_DN'] = params.config['configurations']['usersync-properties']['SYNC_LDAP_BIND_DN']
+    d['SYNC_LDAP_BIND_PASSWORD'] = params.config['configurations']['usersync-properties']['SYNC_LDAP_BIND_PASSWORD']
+    d['CRED_KEYSTORE_FILENAME'] = params.config['configurations']['usersync-properties']['CRED_KEYSTORE_FILENAME']
+    d['SYNC_LDAP_USER_SEARCH_BASE'] = params.config['configurations']['usersync-properties']['SYNC_LDAP_USER_SEARCH_BASE']
+    d['SYNC_LDAP_USER_SEARCH_SCOPE'] = params.config['configurations']['usersync-properties']['SYNC_LDAP_USER_SEARCH_SCOPE']
+    d['SYNC_LDAP_USER_OBJECT_CLASS'] = params.config['configurations']['usersync-properties']['SYNC_LDAP_USER_OBJECT_CLASS']
+    d['SYNC_LDAP_USER_SEARCH_FILTER'] = params.config['configurations']['usersync-properties']['SYNC_LDAP_USER_SEARCH_FILTER']
+    d['SYNC_LDAP_USER_NAME_ATTRIBUTE'] = params.config['configurations']['usersync-properties']['SYNC_LDAP_USER_NAME_ATTRIBUTE']
+    d['SYNC_LDAP_USER_GROUP_NAME_ATTRIBUTE'] = params.config['configurations']['usersync-properties']['SYNC_LDAP_USER_GROUP_NAME_ATTRIBUTE']
+    d['SYNC_LDAP_USERNAME_CASE_CONVERSION'] = params.config['configurations']['usersync-properties']['SYNC_LDAP_USERNAME_CASE_CONVERSION']
+    d['SYNC_LDAP_GROUPNAME_CASE_CONVERSION'] = params.config['configurations']['usersync-properties']['SYNC_LDAP_GROUPNAME_CASE_CONVERSION']
+    d['logdir'] = params.config['configurations']['usersync-properties']['logdir']
+
+    return d
+
+def check_db_connnection(env):
+    import params
+    env.set_params(params)
+    
+    db_root_password = params.config['configurations']['admin-properties']["db_root_password"]
+    db_root_user = params.config['configurations']['admin-properties']["db_root_user"]
+    db_host = params.config['configurations']['admin-properties']['db_host']
+    sql_command_invoker = params.config['configurations']['admin-properties']['SQL_COMMAND_INVOKER']
+
+    Logger.info('Checking MYSQL root password')
+
+    cmd_str = "\""+sql_command_invoker+"\""+" -u "+db_root_user+" --password="+db_root_password+" -h "+db_host+" -s -e \"select version();\""
+    status, output = get_status_output(cmd_str)
+    
+    if status == 0:
+        Logger.info('Checking MYSQL root password DONE')
+        return True 
+    else:
+        Logger.info('Ranger Admin installation Failed! Ranger requires DB client installed on Ranger Host and DB server running on DB Host')
+        sys.exit(1)
+
+def get_status_output(cmd):
+    import subprocess
+
+    ret = subprocess.call(cmd, shell=True)
+    return ret, ret

+ 2 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json

@@ -3,6 +3,8 @@
   "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
   "general_deps" : {
     "_comment" : "dependencies for all cases",
+    "RANGER_SERVICE_CHECK-SERVICE_CHECK" : ["RANGER_ADMIN-START"],
+    "RANGER_SERVICE_CHECK-SERVICE_CHECK" : ["RANGER_USERSYNC-START"],
     "FALCON_SERVER-START": ["NAMENODE-START", "DATANODE-START", "OOZIE_SERVER-START"],
     "METRIC_COLLECTOR-START": ["NAMENODE-START", "DATANODE-START"],
     "AMS_SERVICE_CHECK-SERVICE_CHECK": ["METRIC_COLLECTOR-START", "HDFS_SERVICE_CHECK-SERVICE_CHECK"],

+ 150 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/ranger-hbase-plugin-properties.xml

@@ -0,0 +1,150 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+
+        <property>
+                <name>ranger-hbase-plugin-enabled</name>
+                <value>No</value>
+                <description>Enable ranger hbase plugin ?</description>
+        </property>
+
+	<property>
+		<name>REPOSITORY_CONFIG_USERNAME</name>
+		<value>hbase</value>
+		<description>Used for repository creation on ranger admin</description>
+	</property>	
+
+	<property>
+		<name>REPOSITORY_CONFIG_PASSWORD</name>
+		<value>hbase</value>
+		<property-type>PASSWORD</property-type>
+		<description>Used for repository creation on ranger admin</description>
+	</property>	
+
+	<property>
+		<name>XAAUDIT.DB.IS_ENABLED</name>
+		<value>true</value>
+		<description></description>
+	</property>	
+
+	<property>
+		<name>XAAUDIT.HDFS.IS_ENABLED</name>
+		<value>false</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINATION_DIRECTORY</name>
+		<value>hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY</name>
+		<value>__REPLACE__LOG_DIR/hadoop/%app-type%/audit</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY</name>
+		<value>__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_FILE</name>
+		<value>%hostname%-audit.log</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS</name>
+		<value>900</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS</name>
+		<value>86400</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS</name>
+		<value>60</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_FILE</name>
+		<value>%time:yyyyMMdd-HHmm.ss%.log</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS</name>
+		<value>60</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS</name>
+		<value>600</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT</name>
+		<value>10</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_KEYSTORE_FILE_PATH</name>
+		<value>/etc/hadoop/conf/ranger-plugin-keystore.jks</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_KEYSTORE_PASSWORD</name>
+		<value>myKeyFilePassword</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_TRUSTSTORE_FILE_PATH</name>
+		<value>/etc/hadoop/conf/ranger-plugin-truststore.jks</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_TRUSTSTORE_PASSWORD</name>
+		<value>changeit</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>UPDATE_XAPOLICIES_ON_GRANT_REVOKE</name>
+		<value>true</value>
+		<description></description>
+	</property>
+
+</configuration>	

+ 156 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml

@@ -0,0 +1,156 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+
+        <property>
+               <name>hadoop.rpc.protection</name>
+               <value>-</value>
+               <description>Used for repository creation on ranger admin</description>
+        </property>
+
+	<property>
+		<name>common.name.for.certificate</name>
+		<value>-</value>
+		<description>Used for repository creation on ranger admin</description>
+	</property>
+
+        <property>
+               <name>ranger-hdfs-plugin-enabled</name>
+               <value>No</value>
+               <description>Enable ranger hdfs plugin ?</description>
+        </property>
+
+	<property>
+		<name>REPOSITORY_CONFIG_USERNAME</name>
+		<value>hadoop</value>
+		<description>Used for repository creation on ranger admin</description>
+	</property>
+
+	<property>
+		<name>REPOSITORY_CONFIG_PASSWORD</name>
+		<value>hadoop</value>
+		<property-type>PASSWORD</property-type>
+		<description>Used for repository creation on ranger admin</description>
+	</property>	
+
+	<property>
+		<name>XAAUDIT.DB.IS_ENABLED</name>
+		<value>true</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.IS_ENABLED</name>
+		<value>false</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINATION_DIRECTORY</name>
+		<value>hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY</name>
+		<value>__REPLACE__LOG_DIR/hadoop/%app-type%/audit</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY</name>
+		<value>__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_FILE</name>
+		<value>%hostname%-audit.log</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS</name>
+		<value>900</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS</name>
+		<value>86400</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS</name>
+		<value>60</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_FILE</name>
+		<value>%time:yyyyMMdd-HHmm.ss%.log</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS</name>
+		<value>60</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS</name>
+		<value>600</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT</name>
+		<value>10</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_KEYSTORE_FILE_PATH</name>
+		<value>/etc/hadoop/conf/ranger-plugin-keystore.jks</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_KEYSTORE_PASSWORD</name>
+		<value>myKeyFilePassword</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_TRUSTSTORE_FILE_PATH</name>
+		<value>/etc/hadoop/conf/ranger-plugin-truststore.jks</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_TRUSTSTORE_PASSWORD</name>
+		<value>changeit</value>
+		<description></description>
+	</property>
+
+</configuration>	

+ 163 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/ranger-hive-plugin-properties.xml

@@ -0,0 +1,163 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+
+        <property>
+                <name>jdbc.driverClassName</name>
+                <value>org.apache.hive.jdbc.HiveDriver</value>
+                <description>Used for repository creation on ranger admin</description>
+        </property>
+
+	<property>
+	        <name>common.name.for.certificate</name>
+        	<value>-</value>
+	        <description>Used for repository creation on ranger admin</description>
+	</property>
+
+
+        <property>
+                <name>ranger-hive-plugin-enabled</name>
+                <value>No</value>
+                <description>Enable ranger hive plugin ?</description>
+        </property>
+
+	<property>
+	        <name>REPOSITORY_CONFIG_USERNAME</name>
+        	<value>hive</value>
+	        <description>Used for repository creation on ranger admin</description>
+	</property>
+
+	<property>
+        	<name>REPOSITORY_CONFIG_PASSWORD</name>
+	        <value>hive</value>
+	        <property-type>PASSWORD</property-type>
+	        <description>Used for repository creation on ranger admin</description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.DB.IS_ENABLED</name>
+		<value>true</value>
+		<description></description>
+	</property>	
+
+	<property>
+		<name>XAAUDIT.HDFS.IS_ENABLED</name>
+		<value>false</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINATION_DIRECTORY</name>
+		<value>hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY</name>
+		<value>__REPLACE__LOG_DIR/hadoop/%app-type%/audit</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY</name>
+		<value>__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_FILE</name>
+		<value>%hostname%-audit.log</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS</name>
+		<value>900</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS</name>
+		<value>86400</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS</name>
+		<value>60</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_FILE</name>
+		<value>%time:yyyyMMdd-HHmm.ss%.log</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS</name>
+		<value>60</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS</name>
+		<value>600</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT</name>
+		<value>10</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_KEYSTORE_FILE_PATH</name>
+		<value>/etc/hadoop/conf/ranger-plugin-keystore.jks</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_KEYSTORE_PASSWORD</name>
+		<value>myKeyFilePassword</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_TRUSTSTORE_FILE_PATH</name>
+		<value>/etc/hadoop/conf/ranger-plugin-truststore.jks</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_TRUSTSTORE_PASSWORD</name>
+		<value>changeit</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>UPDATE_XAPOLICIES_ON_GRANT_REVOKE</name>
+		<value>true</value>
+		<description></description>
+	</property>	
+
+</configuration>	

+ 29 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/services/RANGER/metainfo.xml

@@ -0,0 +1,29 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<metainfo>
+    <schemaVersion>2.0</schemaVersion>
+    <services>
+        <service>
+            <name>RANGER</name>
+            <extends>common-services/RANGER/0.4.0</extends>		
+        </service>
+    </services>
+</metainfo>

+ 109 - 1
ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py

@@ -23,6 +23,7 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
     parentRecommendConfDict = super(HDP22StackAdvisor, self).getServiceConfigurationRecommenderDict()
     childRecommendConfDict = {
       "HDFS": self.recommendHDFSConfigurations,
+      "HIVE": self.recommendHIVEConfigurations,
       "HBASE": self.recommendHBASEConfigurations,
       "MAPREDUCE2": self.recommendMapReduce2Configurations,
       "TEZ": self.recommendTezConfigurations,
@@ -46,11 +47,38 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
     putHDFSProperty('namenode_opt_newsize', max(int(clusterData['totalAvailableRam'] / 8), 128))
     putHDFSProperty = self.putProperty(configurations, "hadoop-env")
     putHDFSProperty('namenode_opt_maxnewsize', max(int(clusterData['totalAvailableRam'] / 8), 256))
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    if 'ranger-hdfs-plugin-properties' in services['configurations']:
+      rangerPluginEnabled = services['configurations']['ranger-hdfs-plugin-properties']['properties']['ranger-hdfs-plugin-enabled']
+      if ("RANGER" in servicesList) and (rangerPluginEnabled.lower() == 'Yes'.lower()):
+        putHDFSProperty("dfs.permissions.enabled",'true')
+
+  def recommendHIVEConfigurations(self, configurations, clusterData, services, hosts):
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    if 'ranger-hive-plugin-properties' in services['configurations']:
+      rangerPluginEnabled = services['configurations']['ranger-hive-plugin-properties']['properties']['ranger-hive-plugin-enabled']
+      if ("RANGER" in servicesList) :
+        if (rangerPluginEnabled.lower() == "Yes".lower()):
+          putHiveProperty = self.putProperty(configurations, "hiveserver2-site")
+          putHiveProperty("hive.security.authorization.manager", 'com.xasecure.authorization.hive.authorizer.XaSecureHiveAuthorizerFactory')
+          putHiveProperty("hive.security.authenticator.manager", 'org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator')
+        elif (rangerPluginEnabled.lower() == "No".lower()):
+          putHiveProperty = self.putProperty(configurations, "hiveserver2-site")
+          putHiveProperty("hive.security.authorization.manager", 'org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider')
+          putHiveProperty("hive.security.authenticator.manager", 'org.apache.hadoop.hive.ql.security.HadoopDefaultAuthenticator')
 
   def recommendHBASEConfigurations(self, configurations, clusterData, services, hosts):
     putHbaseSiteProperty = self.putProperty(configurations, "hbase-site")
     putHbaseSiteProperty("hbase.regionserver.global.memstore.upperLimit", '0.4')
 
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    if 'ranger-hbase-plugin-properties' in services['configurations']:
+      rangerPluginEnabled = services['configurations']['ranger-hbase-plugin-properties']['properties']['ranger-hbase-plugin-enabled']
+      if ("RANGER" in servicesList) and (rangerPluginEnabled.lower() == "Yes".lower()):
+          putHbaseSiteProperty("hbase.security.authorization", 'true')
+          putHbaseSiteProperty("hbase.coprocessor.master.classes", 'com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor')
+          putHbaseSiteProperty("hbase.coprocessor.region.classes", 'com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor')
+
   def recommendTezConfigurations(self, configurations, clusterData, services, hosts):
     putTezProperty = self.putProperty(configurations, "tez-site")
     putTezProperty("tez.am.resource.memory.mb", int(clusterData['amMemory']) * 2 if int(clusterData['amMemory']) < 3072 else int(clusterData['amMemory']))
@@ -104,6 +132,7 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
     childValidators = {
       "HDFS": {"hdfs-site": self.validateHDFSConfigurations,
                "hadoop-env": self.validateHDFSConfigurationsEnv},
+      "HIVE": {"hiveserver2-site": self.validateHIVEConfigurations},
       "HBASE": {"hbase-site": self.validateHBASEConfigurations},
       "MAPREDUCE2": {"mapred-site": self.validateMapReduce2Configurations},
       "AMS": {"ams-hbase-site": self.validateAmsHbaseSiteConfigurations,
@@ -232,7 +261,7 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
                         {"config-name": 'namenode_opt_newsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'namenode_opt_newsize')},
                         {"config-name": 'namenode_opt_maxnewsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'namenode_opt_maxnewsize')}]
     return self.toConfigurationValidationProblems(validationItems, "hadoop-env")
-
+  
   def validateHDFSConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
     # We can not access property hadoop.security.authentication from the
     # other config (core-site). That's why we are using another heuristics here
@@ -253,6 +282,16 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
     VALID_TRANSFER_PROTECTION_VALUES = ['authentication', 'integrity', 'privacy']
 
     validationItems = []
+    #Adding Ranger Plugin logic here 
+    ranger_plugin_properties = getSiteProperties(configurations, "ranger-hdfs-plugin-properties")
+    ranger_plugin_enabled = ranger_plugin_properties['ranger-hdfs-plugin-enabled']
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    if ("RANGER" in servicesList) and (ranger_plugin_enabled.lower() == 'Yes'.lower()):
+      if hdfs_site['dfs.permissions.enabled'] != 'true':
+        validationItems.append({"config-name": 'dfs.permissions.enabled',
+                                    "item": self.getWarnItem(
+                                      "dfs.permissions.enabled needs to be set to true if Ranger HDFS Plugin is enabled.")})
+
     if (not wire_encryption_enabled and   # If wire encryption is enabled at Hadoop, it disables all our checks
           core_site['hadoop.security.authentication'] == 'kerberos' and
           core_site['hadoop.security.authorization'] == 'true'):
@@ -339,6 +378,48 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
                                       data_transfer_protection_value, VALID_TRANSFER_PROTECTION_VALUES))})
     return self.toConfigurationValidationProblems(validationItems, "hdfs-site")
 
+  def validateHIVEConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    hive_server2 = properties
+    validationItems = [] 
+    #Adding Ranger Plugin logic here 
+    ranger_plugin_properties = getSiteProperties(configurations, "ranger-hive-plugin-properties")
+    ranger_plugin_enabled = ranger_plugin_properties['ranger-hive-plugin-enabled']
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    ##Add stack validations only if Ranger is enabled.
+    if ("RANGER" in servicesList):
+      ##Add stack validations for  Ranger plugin enabled.
+      if (ranger_plugin_enabled.lower() == 'Yes'.lower()):
+        prop_name = 'hive.security.authorization.manager'
+        prop_val = "com.xasecure.authorization.hive.authorizer.XaSecureHiveAuthorizerFactory"
+        if hive_server2[prop_name] != prop_val:
+          validationItems.append({"config-name": prop_name,
+                                  "item": self.getWarnItem(
+                                  "If Ranger HIVE Plugin is enabled."\
+                                  " {0} needs to be set to {1}".format(prop_name,prop_val))})
+        prop_name = 'hive.security.authenticator.manager'
+        prop_val = "org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator"
+        if hive_server2[prop_name] != prop_val:
+          validationItems.append({"config-name": prop_name,
+                                  "item": self.getWarnItem(
+                                  "If Ranger HIVE Plugin is enabled."\
+                                  " {0} needs to be set to {1}".format(prop_name,prop_val))})
+      ##Add stack validations for  Ranger plugin disabled.
+      elif (ranger_plugin_enabled.lower() == 'No'.lower()):
+        prop_name = 'hive.security.authorization.manager'
+        prop_val = "org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider"
+        if hive_server2[prop_name] != prop_val:
+          validationItems.append({"config-name": prop_name,
+                                  "item": self.getWarnItem(
+                                  "If Ranger HIVE Plugin is disabled."\
+                                  " {0} needs to be set to {1}".format(prop_name,prop_val))})
+        prop_name = 'hive.security.authenticator.manager'
+        prop_val = "org.apache.hadoop.hive.ql.security.HadoopDefaultAuthenticator"
+        if hive_server2[prop_name] != prop_val:
+          validationItems.append({"config-name": prop_name,
+                                  "item": self.getWarnItem(
+                                  "If Ranger HIVE Plugin is disabled."\
+                                  " {0} needs to be set to {1}".format(prop_name,prop_val))})
+    return self.toConfigurationValidationProblems(validationItems, "hiveserver2-site")
 
   def validateHBASEConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
     hbase_site = properties
@@ -360,6 +441,33 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
       validationItems.append({"config-name": prop_name1,
                               "item": self.getWarnItem(
                               "{0} and {1} sum should not exceed {2}".format(prop_name1, prop_name2, props_max_sum))})
+
+    #Adding Ranger Plugin logic here 
+    ranger_plugin_properties = getSiteProperties(configurations, "ranger-hbase-plugin-properties")
+    ranger_plugin_enabled = ranger_plugin_properties['ranger-hbase-plugin-enabled']
+    prop_name = 'hbase.security.authorization'
+    prop_val = "true"
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    if ("RANGER" in servicesList) and (ranger_plugin_enabled.lower() == 'Yes'.lower()):
+      if hbase_site[prop_name] != prop_val:
+        validationItems.append({"config-name": prop_name,
+                                "item": self.getWarnItem(
+                                "If Ranger HBASE Plugin is enabled."\
+                                "{0} needs to be set to {1}".format(prop_name,prop_val))})
+      prop_name = "hbase.coprocessor.master.classes"
+      prop_val = "com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor"
+      if hbase_site[prop_name] != prop_val:
+        validationItems.append({"config-name": prop_name,
+                                "item": self.getWarnItem(
+                                "If Ranger HBASE Plugin is enabled."\
+                                " {0} needs to be set to {1}".format(prop_name,prop_val))})
+      prop_name = "hbase.coprocessor.region.classes"
+      prop_val = "com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor"
+      if hbase_site[prop_name] != prop_val:
+        validationItems.append({"config-name": prop_name,
+                                "item": self.getWarnItem(
+                                "If Ranger HBASE Plugin is enabled."\
+                                " {0} needs to be set to {1}".format(prop_name,prop_val))})
     return self.toConfigurationValidationProblems(validationItems, "hbase-site")
 
   def getMastersWithMultipleInstances(self):

+ 6 - 0
ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json

@@ -327,6 +327,12 @@
             "ipc.client.connection.maxidletime": "30000", 
             "ipc.client.connect.max.retries": "50"
         }, 
+        "ranger-hdfs-plugin-properties" : {
+            "ranger-hdfs-plugin-enabled":"yes"
+        },
+        "ranger-hbase-plugin-properties" : {
+            "ranger-hbase-plugin-enabled":"yes"
+        },
         "yarn-env": {
             "yarn_pid_dir_prefix": "/var/run/hadoop-yarn", 
             "apptimelineserver_heapsize": "1024", 

+ 6 - 0
ambari-server/src/test/python/stacks/2.0.6/configs/default.json

@@ -373,6 +373,12 @@
             "hive.server2.transport.mode": "binary",
             "hive.optimize.mapjoin.mapreduce": "true"
         }, 
+        "ranger-hive-plugin-properties": {
+            "ranger-hive-plugin-enabled":"yes"
+        },
+        "ranger-knox-plugin-properties": {
+            "ranger-knox-plugin-enabled":"yes"
+        },
         "yarn-site": {
             "yarn.nodemanager.disk-health-checker.min-healthy-disks": "0.25", 
             "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor", 

+ 3 - 0
ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json

@@ -331,6 +331,9 @@
             "ipc.client.connection.maxidletime": "30000", 
             "ipc.client.connect.max.retries": "50"
         }, 
+        "ranger-hive-plugin-properties" : {
+            "ranger-hive-plugin-enabled":"yes"
+        },
         "hive-site": {
             "hive.enforce.sorting": "true", 
             "javax.jdo.option.ConnectionPassword": "!`\"' 1", 

+ 3 - 0
ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json

@@ -228,6 +228,9 @@
             "ipc.client.connection.maxidletime": "30000", 
             "hadoop.security.auth_to_local": "\n        RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/\n        RULE:[2:$1@$0](jhs@.*)s/.*/mapred/\n        RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/\n        RULE:[2:$1@$0](hm@.*)s/.*/hbase/\n        RULE:[2:$1@$0](rs@.*)s/.*/hbase/\n        DEFAULT"
         }, 
+        "ranger-hdfs-plugin-properties" : {
+            "ranger-hdfs-plugin-enabled":"yes"
+        },
         "hdfs-log4j": {
             "log4j.appender.DRFA.layout": "org.apache.log4j.PatternLayout", 
             "log4j.appender.DRFA.layout.ConversionPattern": "%d{ISO8601} %p %c: %m%n", 

+ 3 - 0
ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json

@@ -533,6 +533,9 @@
             "hbase_regionserver_heapsize": "1024m", 
             "hbase_log_dir": "/var/log/hbase"
         }, 
+        "ranger-hbase-plugin-properties": {
+            "ranger-hbase-plugin-enabled":"yes"
+        },        
         "ganglia-env": {
             "gmond_user": "nobody", 
             "ganglia_runtime_dir": "/var/run/ganglia/hdp", 

+ 3 - 0
ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json

@@ -528,6 +528,9 @@
             "hbase_regionserver_heapsize": "1024m", 
             "hbase_log_dir": "/var/log/hbase"
         }, 
+        "ranger-hbase-plugin-properties": {
+            "ranger-hbase-plugin-enabled":"yes"
+        },        
         "ganglia-env": {
             "gmond_user": "nobody", 
             "ganglia_runtime_dir": "/var/run/ganglia/hdp", 

+ 3 - 0
ambari-server/src/test/python/stacks/2.0.6/configs/hbase-preupgrade.json

@@ -82,6 +82,9 @@
             "hbase_regionserver_heapsize": "1024m", 
             "hbase_log_dir": "/var/log/hbase"
         }, 
+        "ranger-hbase-plugin-properties": {
+            "ranger-hbase-plugin-enabled":"yes"
+        },        
         "cluster-env": {
             "security_enabled": "false", 
             "hive_tar_source": "/usr/hdp/current/hive-client/hive.tar.gz", 

+ 3 - 0
ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json

@@ -533,6 +533,9 @@
             "hbase_regionserver_heapsize": "1024m", 
             "hbase_log_dir": "/var/log/hbase"
         }, 
+        "ranger-hbase-plugin-properties": {
+            "ranger-hbase-plugin-enabled":"yes"
+        },
         "ganglia-env": {
             "gmond_user": "nobody", 
             "ganglia_runtime_dir": "/var/run/ganglia/hdp", 

+ 6 - 0
ambari-server/src/test/python/stacks/2.0.6/configs/secured.json

@@ -368,6 +368,12 @@
             "hadoop.proxyuser.oozie.hosts": "c6402.ambari.apache.org", 
             "ipc.client.connection.maxidletime": "30000"
         }, 
+        "ranger-hdfs-plugin-properties" : {
+            "ranger-hdfs-plugin-enabled":"yes"
+        },
+        "ranger-hive-plugin-properties" : {
+            "ranger-hive-plugin-enabled":"yes"
+        },
         "hive-site": {
             "hive.enforce.sorting": "true", 
             "javax.jdo.option.ConnectionPassword": "!`\"' 1", 

+ 3 - 0
ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json

@@ -366,6 +366,9 @@
             "hadoop.proxyuser.oozie.hosts": "c6402.ambari.apache.org", 
             "ipc.client.connection.maxidletime": "30000"
         }, 
+        "ranger-hive-plugin-properties" : {
+            "ranger-hive-plugin-enabled":"yes"
+        },
         "hive-site": {
             "hive.enforce.sorting": "true", 
             "javax.jdo.option.ConnectionPassword": "!`\"' 1", 

+ 145 - 18
ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py

@@ -159,10 +159,20 @@ class TestHDP22StackAdvisor(TestCase):
       },
       'core-site': {
         'properties': unsecure_cluster_core_site
+      },
+      'ranger-hdfs-plugin-properties':{
+          'properties': {'ranger-hdfs-plugin-enabled':'Yes'}
       }
     }
+    services = {"services":
+                    [{"StackServices":
+                          {"service_name" : "HDFS",
+                           "service_version" : "2.6.0.2.2",
+                           }
+                     }]
+                }
     expected = []  # No warnings
-    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, None, None)
+    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
     self.assertEquals(validation_problems, expected)
 
     # TEST CASE: Unsecured cluster, unsecure ports
@@ -176,10 +186,22 @@ class TestHDP22StackAdvisor(TestCase):
         },
       'core-site': {
         'properties': unsecure_cluster_core_site
+      },
+      'ranger-hdfs-plugin-properties': {
+          'properties':{
+              'ranger-hdfs-plugin-enabled':'Yes'
+          }
       }
     }
     expected = []  # No warnings
-    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, None, None)
+    services = {"services":
+                [{"StackServices":
+                      {"service_name" : "HDFS",
+                       "service_version" : "2.6.0.2.2",
+                       }
+                 }]
+            }
+    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
     self.assertEquals(validation_problems, expected)
 
     # TEST CASE: Secure cluster, invalid dfs.http.policy value
@@ -194,6 +216,11 @@ class TestHDP22StackAdvisor(TestCase):
       },
       'core-site': {
         'properties': secure_cluster_core_site
+      },
+      'ranger-hdfs-plugin-properties': {
+          'properties':{
+              'ranger-hdfs-plugin-enabled':'Yes'
+          }
       }
     }
     expected = [{'config-name': 'dfs.http.policy',
@@ -201,7 +228,14 @@ class TestHDP22StackAdvisor(TestCase):
                  'level': 'WARN',
                  'message': "Invalid property value: WRONG_VALUE. Valid values are ['HTTP_ONLY', 'HTTPS_ONLY', 'HTTP_AND_HTTPS']",
                  'type': 'configuration'}]
-    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, None, None)
+    services = {"services":
+            [{"StackServices":
+                  {"service_name" : "HDFS",
+                   "service_version" : "2.6.0.2.2",
+                   }
+             }]
+        }
+    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
     self.assertEquals(validation_problems, expected)
 
     # TEST CASE: Secure cluster, dfs.http.policy=HTTPS_ONLY, https address not defined
@@ -215,10 +249,22 @@ class TestHDP22StackAdvisor(TestCase):
         },
       'core-site': {
         'properties': secure_cluster_core_site
+      },
+      'ranger-hdfs-plugin-properties': {
+          'properties':{
+              'ranger-hdfs-plugin-enabled':'Yes'
+          }
       }
     }
     expected = [ ]
-    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, None, None)
+    services = {"services":
+            [{"StackServices":
+                  {"service_name" : "HDFS",
+                   "service_version" : "2.6.0.2.2",
+                   }
+             }]
+        }
+    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
     self.assertEquals(validation_problems, expected)
 
     # TEST CASE: Secure cluster, dfs.http.policy=HTTPS_ONLY, https address defined and secure
@@ -233,10 +279,22 @@ class TestHDP22StackAdvisor(TestCase):
         },
       'core-site': {
         'properties': secure_cluster_core_site
+      },
+      'ranger-hdfs-plugin-properties': {
+          'properties':{
+              'ranger-hdfs-plugin-enabled':'Yes'
+          }
       }
     }
     expected = []
-    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, None, None)
+    services = {"services":
+            [{"StackServices":
+                  {"service_name" : "HDFS",
+                   "service_version" : "2.6.0.2.2",
+                   }
+             }]
+        }
+    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
     self.assertEquals(validation_problems, expected)
 
     # TEST CASE: Secure cluster, dfs.http.policy=HTTPS_ONLY, https address defined and non secure
@@ -251,10 +309,22 @@ class TestHDP22StackAdvisor(TestCase):
       },
       'core-site': {
         'properties': secure_cluster_core_site
+      },
+      'ranger-hdfs-plugin-properties': {
+          'properties':{
+              'ranger-hdfs-plugin-enabled':'Yes'
+          }
       }
     }
     expected = []
-    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, None, None)
+    services = {"services":
+            [{"StackServices":
+                  {"service_name" : "HDFS",
+                   "service_version" : "2.6.0.2.2",
+                   }
+             }]
+        }
+    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
     self.assertEquals(validation_problems, expected)
 
     # TEST CASE: Secure cluster, dfs.http.policy=HTTPS_ONLY, non secure dfs port, https property not defined
@@ -268,7 +338,13 @@ class TestHDP22StackAdvisor(TestCase):
       },
       'core-site': {
         'properties': secure_cluster_core_site
+      },
+      'ranger-hdfs-plugin-properties': {
+          'properties':{
+              'ranger-hdfs-plugin-enabled':'Yes'
+          }
       }
+
     }
     expected = [{'config-name': 'dfs.datanode.address',
                  'config-type': 'hdfs-site',
@@ -298,7 +374,7 @@ class TestHDP22StackAdvisor(TestCase):
                             "order to be able to use HTTPS.",
                  'type': 'configuration'}
     ]
-    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, None, None)
+    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
     self.assertEquals(validation_problems, expected)
 
 
@@ -314,6 +390,11 @@ class TestHDP22StackAdvisor(TestCase):
         },
       'core-site': {
         'properties': secure_cluster_core_site
+      },
+      'ranger-hdfs-plugin-properties': {
+          'properties':{
+              'ranger-hdfs-plugin-enabled':'Yes'
+          }
       }
     }
     expected = [{'config-name': 'dfs.datanode.address',
@@ -343,7 +424,7 @@ class TestHDP22StackAdvisor(TestCase):
                             "able to use HTTPS.",
                  'type': 'configuration'}
     ]
-    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, None, None)
+    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
     self.assertEquals(validation_problems, expected)
 
     # TEST CASE: Secure cluster, dfs.http.policy=HTTPS_ONLY, valid non-root configuration
@@ -359,10 +440,15 @@ class TestHDP22StackAdvisor(TestCase):
       },
       'core-site': {
         'properties': secure_cluster_core_site
+      },
+      'ranger-hdfs-plugin-properties': {
+          'properties':{
+              'ranger-hdfs-plugin-enabled':'Yes'
+          }
       }
     }
     expected = []
-    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, None, None)
+    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
     self.assertEquals(validation_problems, expected)
 
     # TEST CASE: Secure cluster, dfs.http.policy=HTTP_ONLY, insecure port
@@ -377,6 +463,11 @@ class TestHDP22StackAdvisor(TestCase):
       },
       'core-site': {
         'properties': secure_cluster_core_site
+      },
+      'ranger-hdfs-plugin-properties': {
+          'properties':{
+              'ranger-hdfs-plugin-enabled':'Yes'
+          }
       }
     }
     expected = [{'config-name': 'dfs.datanode.address',
@@ -398,7 +489,7 @@ class TestHDP22StackAdvisor(TestCase):
                             "['dfs.datanode.address', 'dfs.datanode.http.address'] use secure ports.",
                  'type': 'configuration'}
                 ]
-    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, None, None)
+    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
     self.assertEquals(validation_problems, expected)
 
     # TEST CASE: Secure cluster, dfs.http.policy=HTTP_ONLY, valid configuration
@@ -413,10 +504,15 @@ class TestHDP22StackAdvisor(TestCase):
         },
       'core-site': {
         'properties': secure_cluster_core_site
+      },
+      'ranger-hdfs-plugin-properties': {
+          'properties':{
+              'ranger-hdfs-plugin-enabled':'Yes'
+          }
       }
     }
     expected = []
-    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, None, None)
+    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
     self.assertEquals(validation_problems, expected)
 
     # TEST CASE: Secure cluster, absent dfs.http.policy (typical situation)
@@ -430,10 +526,15 @@ class TestHDP22StackAdvisor(TestCase):
         },
       'core-site': {
         'properties': secure_cluster_core_site
+      },
+      'ranger-hdfs-plugin-properties': {
+          'properties':{
+              'ranger-hdfs-plugin-enabled':'Yes'
+          }
       }
     }
     expected = []
-    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, None, None)
+    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
     self.assertEquals(validation_problems, expected)
 
     # TEST CASE: Secure cluster, dfs.http.policy=HTTP_ONLY, misusage of dfs.data.transfer.protection warning
@@ -449,6 +550,11 @@ class TestHDP22StackAdvisor(TestCase):
         },
       'core-site': {
         'properties': secure_cluster_core_site
+      },
+      'ranger-hdfs-plugin-properties': {
+          'properties':{
+              'ranger-hdfs-plugin-enabled':'Yes'
+          }
       }
     }
     expected = [{'config-name': 'dfs.data.transfer.protection',
@@ -457,7 +563,7 @@ class TestHDP22StackAdvisor(TestCase):
                  'message': "dfs.data.transfer.protection property can not be used when dfs.http.policy is "
                             "set to any value other then HTTPS_ONLY. Tip: When dfs.http.policy property is not defined, it defaults to HTTP_ONLY",
                  'type': 'configuration'}]
-    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, None, None)
+    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
     self.assertEquals(validation_problems, expected)
 
     # TEST CASE: Secure cluster, dfs.http.policy=HTTPS_ONLY, wrong dfs.data.transfer.protection value
@@ -473,6 +579,11 @@ class TestHDP22StackAdvisor(TestCase):
       },
       'core-site': {
         'properties': secure_cluster_core_site
+      },
+      'ranger-hdfs-plugin-properties': {
+          'properties':{
+              'ranger-hdfs-plugin-enabled':'Yes'
+          }
       }
     }
     expected = [{'config-name': 'dfs.data.transfer.protection',
@@ -480,7 +591,7 @@ class TestHDP22StackAdvisor(TestCase):
                  'level': 'WARN',
                  'message': "Invalid property value: WRONG_VALUE. Valid values are ['authentication', 'integrity', 'privacy'].",
                  'type': 'configuration'}]
-    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, None, None)
+    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
     self.assertEquals(validation_problems, expected)
 
     # TEST CASE: Hadoop wire encryption enabled
@@ -496,10 +607,15 @@ class TestHDP22StackAdvisor(TestCase):
       },
       'core-site': {
         'properties': secure_cluster_core_site
+      },
+      'ranger-hdfs-plugin-properties': {
+          'properties':{
+              'ranger-hdfs-plugin-enabled':'Yes'
+          }
       }
     }
     expected = []  # No warnings
-    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, None, None)
+    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
     self.assertEquals(validation_problems, expected)
 
   def test_recommendYARNConfigurations(self):
@@ -631,11 +747,22 @@ class TestHDP22StackAdvisor(TestCase):
       'hdfs-site': {
         'properties': {
           'dfs.datanode.max.transfer.threads': '16384'
-        }
+        },
       }
     }
-
-    self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, '', '')
+    services = {"services":
+                    [{"StackServices":
+                          {"service_name" : "HDFS",
+                           "service_version" : "2.6.0.2.2",
+                           }
+                     }],
+                "configurations": {
+                    'ranger-hdfs-plugin-properties':{
+                        "properties": {"ranger-hdfs-plugin-enabled":"Yes"}
+                    }
+                }
+                }
+    self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, '')
     self.assertEquals(configurations, expected)
 
   def test_validateHDFSConfigurationsEnv(self):

+ 7 - 3
ambari-server/src/test/python/stacks/2.2/configs/default.json

@@ -150,9 +150,13 @@
         "kafka.ganglia.metrics.port": "8649",
         "log.index.interval.bytes": "4096",
         "log.retention.hours": "168"
-      }
-
-
+      },
+      "ranger-hbase-plugin-properties": {
+            "ranger-hbase-plugin-enabled":"yes"
+      },
+      "ranger-hive-plugin-properties": {
+            "ranger-hive-plugin-enabled":"yes"
+       }
     },
     "configuration_attributes": {
         "yarn-site": {

+ 3 - 0
ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json

@@ -400,6 +400,9 @@
             "pig_tar_destination_folder": "hdfs:///hdp/apps/{{ hdp_stack_version }}/pig/",
             "sink.dbpassword": "",
             "sink_database": "Existing MSSQL Server database with sql auth"
+        },
+        "ranger-hive-plugin-properties" : {
+            "ranger-hive-plugin-enabled":"yes"
         }
     },
     "configurationTags": {

+ 1 - 0
ambari-web/app/config.js

@@ -64,6 +64,7 @@ App.supports = {
   alwaysEnableManagedMySQLForHive: true,
   preKerberizeCheck: false,
   automatedKerberos: true,
+  ranger: false,
   customizeAgentUserAccount: false,
   installGanglia: false,
   opsDuringRollingUpgrade: false