Explorar o código

AMBARI-11223. Port Enhanced Config changes to HDPWIN stack (srimanth)

Srimanth Gunturi %!s(int64=10) %!d(string=hai) anos
pai
achega
cf2283b88d
Modificáronse 20 ficheiros con 3003 adicións e 99 borrados
  1. 4 4
      ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/metainfo.xml
  2. 52 0
      ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hive-site.xml
  3. 52 18
      ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/stack_advisor.py
  4. 56 1
      ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HBASE/configuration/hbase-site.xml
  5. 6 0
      ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HBASE/metainfo.xml
  6. 367 0
      ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HBASE/themes/theme.json
  7. 6 0
      ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HDFS/metainfo.xml
  8. 179 0
      ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HDFS/themes/theme.json
  9. 139 0
      ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HIVE/configuration/hive-env.xml
  10. 573 5
      ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HIVE/configuration/hive-site.xml
  11. 100 0
      ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HIVE/configuration/hiveserver2-site.xml
  12. 6 0
      ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HIVE/metainfo.xml
  13. 408 0
      ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HIVE/themes/theme.json
  14. 15 1
      ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/capacity-scheduler.xml
  15. 44 0
      ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/yarn-env.xml
  16. 104 1
      ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/yarn-site.xml
  17. 13 0
      ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/metainfo.xml
  18. 132 0
      ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/themes-mapred/theme.json
  19. 250 0
      ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/themes/theme.json
  20. 497 69
      ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/stack_advisor.py

+ 4 - 4
ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/metainfo.xml

@@ -49,10 +49,10 @@
       </osSpecifics>
 
       <themes>
-	<theme>
-	  <fileName>theme.json</fileName>
-	  <default>true</default>
-	</theme>
+        <theme>
+          <fileName>theme.json</fileName>
+          <default>true</default>
+        </theme>
       </themes>
 
     </service>

+ 52 - 0
ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hive-site.xml

@@ -122,6 +122,24 @@
     <name>hive.tez.container.size</name>
     <value>682</value>
     <description>By default, Tez uses the java options from map tasks. Use this property to override that value.</description>
+    <display-name>Tez Container Size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>682</minimum>
+      <maximum>6820</maximum>
+      <unit>MB</unit>
+      <increment-step>682</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.scheduler.minimum-allocation-mb</name>
+      </property>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.scheduler.maximum-allocation-mb</name>
+      </property>
+    </depends-on>
   </property>
 
   <property>
@@ -207,12 +225,46 @@
     <name>hive.compactor.initiator.on</name>
     <value>false</value>
     <description>Whether to run the compactor's initiator thread in this metastore instance or not. If there is more than one instance of the thrift metastore this should only be set to true for one of them.</description>
+    <display-name>Run Compactor</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_txn_acid</name>
+      </property>
+    </depends-on>
   </property>
 
   <property>
     <name>hive.compactor.worker.threads</name>
     <value>0</value>
     <description>Number of compactor worker threads to run on this metastore instance. Can be different values on different metastore instances.</description>
+    <display-name>Number of threads used by Compactor</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>20</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_txn_acid</name>
+      </property>
+    </depends-on>
   </property>
 
   <property>

+ 52 - 18
ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/stack_advisor.py

@@ -131,7 +131,10 @@ class HDPWIN21StackAdvisor(DefaultStackAdvisor):
 
   def recommendYARNConfigurations(self, configurations, clusterData, services, hosts):
     putYarnProperty = self.putProperty(configurations, "yarn-site", services)
-    putYarnProperty('yarn.nodemanager.resource.memory-mb', int(round(clusterData['containers'] * clusterData['ramPerContainer'])))
+    nodemanagerMinRam = 1048576 # 1TB in mb
+    for nodemanager in self.getHostsWithComponent("YARN", "NODEMANAGER", services, hosts):
+      nodemanagerMinRam = min(nodemanager["Hosts"]["total_mem"]/1024, nodemanagerMinRam)
+    putYarnProperty('yarn.nodemanager.resource.memory-mb', int(round(min(clusterData['containers'] * clusterData['ramPerContainer'], nodemanagerMinRam))))
     putYarnProperty('yarn.scheduler.minimum-allocation-mb', int(clusterData['ramPerContainer']))
     putYarnProperty('yarn.scheduler.maximum-allocation-mb', int(configurations["yarn-site"]["properties"]["yarn.nodemanager.resource.memory-mb"]))
 
@@ -319,16 +322,48 @@ class HDPWIN21StackAdvisor(DefaultStackAdvisor):
     for service in services["services"]:
       serviceName = service["StackServices"]["service_name"]
       validator = self.validateServiceConfigurations(serviceName)
-      if validator is not  None:
+      if validator is not None:
         for siteName, method in validator.items():
           if siteName in recommendedDefaults:
             siteProperties = getSiteProperties(configurations, siteName)
             if siteProperties is not None:
-              resultItems = method(siteProperties, recommendedDefaults[siteName]["properties"], configurations, services, hosts)
+              siteRecommendations = recommendedDefaults[siteName]["properties"]
+              print("SiteName: %s, method: %s\n" % (siteName, method.__name__))
+              print("Site properties: %s\n" % str(siteProperties))
+              print("Recommendations: %s\n********\n" % str(siteRecommendations))
+              resultItems = method(siteProperties, siteRecommendations, configurations, services, hosts)
               items.extend(resultItems)
+    clusterWideItems = self.validateClusterConfigurations(configurations, services, hosts)
+    items.extend(clusterWideItems)
     self.validateMinMax(items, recommendedDefaults, configurations)
     return items
 
+  def validateClusterConfigurations(self, configurations, services, hosts):
+    validationItems = []
+    hostComponents = {}
+    failureMessage = ""
+
+    for service in services["services"]:
+      for component in service["components"]:
+        if component["StackServiceComponents"]["hostnames"] is not None:
+          for hostName in component["StackServiceComponents"]["hostnames"]:
+            if hostName not in hostComponents.keys():
+              hostComponents[hostName] = []
+            hostComponents[hostName].append(component["StackServiceComponents"]["component_name"])
+
+    for host in hosts["items"]:
+      # Not enough physical memory
+      requiredMemory = getMemorySizeRequired(hostComponents[host["Hosts"]["host_name"]], configurations)
+      if host["Hosts"]["total_mem"] * 1024 < requiredMemory:  # in bytes
+        failureMessage += "Not enough physical RAM on the host {0}. " \
+                          "At least {1} MB is recommended based on components assigned.\n" \
+          .format(host["Hosts"]["host_name"], requiredMemory/1048576)  # MB
+    if failureMessage:
+      notEnoughMemoryItem = self.getWarnItem(failureMessage)
+      validationItems.extend([{"config-name": "", "item": notEnoughMemoryItem}])
+
+    return self.toConfigurationValidationProblems(validationItems, "")
+
   def getServiceConfigurationValidators(self):
     return {
       "HDFS": {"hadoop-env": self.validateHDFSConfigurationsEnv},
@@ -409,6 +444,11 @@ class HDPWIN21StackAdvisor(DefaultStackAdvisor):
     return {"level": "ERROR", "message": message}
 
   def validatorLessThenDefaultValue(self, properties, recommendedDefaults, propertyName):
+    if propertyName not in recommendedDefaults:
+      # If a property name exists in say hbase-env and hbase-site (which is allowed), then it will exist in the
+      # "properties" dictionary, but not necessarily in the "recommendedDefaults" dictionary". In this case, ignore it.
+      return None
+
     if not propertyName in properties:
       return self.getErrorItem("Value should be set")
     value = to_number(properties[propertyName])
@@ -537,6 +577,15 @@ class HDPWIN21StackAdvisor(DefaultStackAdvisor):
       return componentHosts[0]
     return None
 
+  def getHostComponentsByCategories(self, hostname, categories, services, hosts):
+    components = []
+    if services is not None and hosts is not None:
+      for service in services["services"]:
+          components.extend([componentEntry for componentEntry in service["components"]
+                              if componentEntry["StackServiceComponents"]["component_category"] in categories
+                              and hostname in componentEntry["StackServiceComponents"]["hostnames"]])
+    return components
+
   def validateAmsHbaseSiteConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
 
     amsCollectorHosts = self.getComponentHostNames(services, "AMBARI_METRICS", "METRICS_COLLECTOR")
@@ -611,16 +660,12 @@ class HDPWIN21StackAdvisor(DefaultStackAdvisor):
     masterHostItem = None
 
     if masterItem is None:
-      hostComponents = {}
       hostMasterComponents = {}
 
       for service in services["services"]:
         for component in service["components"]:
           if component["StackServiceComponents"]["hostnames"] is not None:
             for hostName in component["StackServiceComponents"]["hostnames"]:
-              if hostName not in hostComponents.keys():
-                hostComponents[hostName] = []
-              hostComponents[hostName].append(component["StackServiceComponents"]["component_name"])
               if self.isMasterComponent(component):
                 if hostName not in hostMasterComponents.keys():
                   hostMasterComponents[hostName] = []
@@ -642,17 +687,6 @@ class HDPWIN21StackAdvisor(DefaultStackAdvisor):
               masterHostItem = self.getWarnItem(
                 masterHostMessage.format(
                   collectorHostName, str(", ".join(hostMasterComponents[collectorHostName]))))
-
-            # Not enough physical memory
-            requiredMemory = getMemorySizeRequired(hostComponents[collectorHostName], configurations)
-            if host["Hosts"]["total_mem"] * 1024 < requiredMemory:  # in bytes
-              message = "Not enough total RAM on the host {0}, " \
-                        "at least {1} MB required for the components({2})" \
-                .format(collectorHostName, requiredMemory/1048576,
-                        str(", ".join(hostComponents[collectorHostName])))  # MB
-              regionServerItem = self.getErrorItem(message)
-              masterItem = self.getErrorItem(message)
-              break
       pass
 
     # Check RS memory in distributed mode since we set default as 512m

+ 56 - 1
ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HBASE/configuration/hbase-site.xml

@@ -70,5 +70,60 @@
       in a region server hits hbase.regionserver.global.memstore.size.lower.limit.
     </description>
   </property>
-
+  <property>
+    <name>hbase.hstore.compaction.max</name>
+    <value>10</value>
+    <description>The maximum number of StoreFiles which will be selected for a single minor
+      compaction, regardless of the number of eligible StoreFiles. Effectively, the value of
+      hbase.hstore.compaction.max controls the length of time it takes a single compaction to
+      complete. Setting it larger means that more StoreFiles are included in a compaction. For most
+      cases, the default value is appropriate.
+    </description>
+    <display-name>Maximum Files in a Store before Compaction</display-name>
+    <value-attributes>
+      <type>int</type>
+      <entries>
+        <entry>
+          <value>8</value>
+        </entry>
+        <entry>
+          <value>9</value>
+        </entry>
+        <entry>
+          <value>10</value>
+        </entry>
+        <entry>
+          <value>11</value>
+        </entry>
+        <entry>
+          <value>12</value>
+        </entry>
+        <entry>
+          <value>13</value>
+        </entry>
+        <entry>
+          <value>14</value>
+        </entry>
+        <entry>
+          <value>15</value>
+        </entry>
+      </entries>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.regionserver.global.memstore.size</name>
+    <value>0.4</value>
+    <description>Percentage of RegionServer memory to allocate to write buffers.
+      Each column family within each region is allocated a smaller pool (the memstore) within this shared write pool.
+      If this buffer is full, updates are blocked and data is flushed from memstores until a global low watermark
+      (hbase.regionserver.global.memstore.size.lower.limit) is reached.
+    </description>
+    <display-name>% of RegionServer Allocated to Write Buffers</display-name>
+    <value-attributes>
+      <type>float</type>
+      <minimum>0</minimum>
+      <maximum>0.8</maximum>
+      <increment-step>0.01</increment-step>
+    </value-attributes>
+  </property>
 </configuration>

+ 6 - 0
ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HBASE/metainfo.xml

@@ -21,6 +21,12 @@
     <service>
       <name>HBASE</name>
       <version>0.98.4.2.2.0.0</version>
+      <themes>
+        <theme>
+          <fileName>theme.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
     </service>
   </services>
 </metainfo>

+ 367 - 0
ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HBASE/themes/theme.json

@@ -0,0 +1,367 @@
+{
+  "name": "default",
+  "description": "Default theme for HBASE service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "settings",
+            "display-name": "Settings",
+            "layout": {
+              "tab-columns": "3",
+              "tab-rows": "3",
+              "sections": [
+                {
+                  "name": "section-hbase-memory",
+                  "display-name": "Server",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "2",
+                  "section-columns": "2",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-hbase-memory-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-hbase-memory-col2",
+                      "row-index": "0",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-hbase-client",
+                  "display-name": "Client",
+                  "row-index": "0",
+                  "column-index": "2",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-hbase-client-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-hbase-disk",
+                  "display-name": "Disk",
+                  "row-index": "1",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "3",
+                  "section-columns": "3",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-hbase-disk-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-hbase-disk-col2",
+                      "row-index": "0",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-hbase-disk-col3",
+                      "row-index": "0",
+                      "column-index": "2",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-hbase-timeouts",
+                  "display-name": "Timeouts",
+                  "row-index": "2",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-hbase-timeouts-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-hbase-security",
+                  "display-name": "Security",
+                  "row-index": "2",
+                  "column-index": "1",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-hbase-security-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "hbase-env/hbase_master_heapsize",
+          "subsection-name": "subsection-hbase-memory-col1"
+        },
+        {
+          "config": "hbase-env/hbase_regionserver_heapsize",
+          "subsection-name": "subsection-hbase-memory-col1"
+        },
+        {
+          "config": "hbase-site/hfile.block.cache.size",
+          "subsection-name": "subsection-hbase-memory-col1"
+        },
+        {
+          "config": "hbase-site/hbase.regionserver.global.memstore.size",
+          "subsection-name": "subsection-hbase-memory-col1"
+        },
+        {
+          "config": "hbase-site/hbase.hregion.memstore.flush.size",
+          "subsection-name": "subsection-hbase-memory-col2"
+        },
+        {
+          "config": "hbase-site/hbase.hregion.memstore.block.multiplier",
+          "subsection-name": "subsection-hbase-memory-col2"
+        },
+        {
+          "config": "hbase-site/hbase.regionserver.handler.count",
+          "subsection-name": "subsection-hbase-memory-col2"
+        },
+        {
+          "config": "hbase-site/hbase.client.retries.number",
+          "subsection-name": "subsection-hbase-client-col1"
+        },
+        {
+          "config": "hbase-site/hbase.client.keyvalue.maxsize",
+          "subsection-name": "subsection-hbase-client-col1"
+        },
+        {
+          "config": "hbase-site/hbase.hregion.max.filesize",
+          "subsection-name": "subsection-hbase-disk-col1"
+        },
+        {
+          "config": "hbase-site/hbase.hregion.majorcompaction",
+          "subsection-name": "subsection-hbase-disk-col2"
+        },
+        {
+          "config": "hbase-site/hbase.hstore.compaction.max",
+          "subsection-name": "subsection-hbase-disk-col3"
+        },
+        {
+          "config": "hbase-site/zookeeper.session.timeout",
+          "subsection-name": "subsection-hbase-timeouts-col1"
+        },
+        {
+          "config": "hbase-site/hbase.rpc.timeout",
+          "subsection-name": "subsection-hbase-timeouts-col1"
+        },
+        {
+          "config": "hbase-site/hbase.security.authentication",
+          "subsection-name": "subsection-hbase-security-col1"
+        },
+        {
+          "config": "hbase-site/hbase.security.authorization",
+          "subsection-name": "subsection-hbase-security-col1"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "hbase-env/hbase_master_heapsize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-env/hbase_regionserver_heapsize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hfile.block.cache.size",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "percent"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.regionserver.global.memstore.size",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "percent"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.hregion.memstore.flush.size",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.hregion.memstore.block.multiplier",
+        "widget": {
+          "type": "combo"
+        }
+      },
+      {
+        "config": "hbase-site/hbase.regionserver.handler.count",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.client.retries.number",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.client.keyvalue.maxsize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.hregion.max.filesize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.hregion.majorcompaction",
+        "widget": {
+          "type": "time-interval-spinner",
+          "units": [
+            {
+              "unit-name": "days,hours"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.hstore.compaction.max",
+        "widget": {
+          "type": "combo"
+        }
+      },
+      {
+        "config": "hbase-site/zookeeper.session.timeout",
+        "widget": {
+          "type": "time-interval-spinner",
+          "units": [
+            {
+              "unit-name": "minutes,seconds"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.rpc.timeout",
+        "widget": {
+          "type": "time-interval-spinner",
+          "units": [
+            {
+              "unit-name": "minutes,seconds"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.security.authentication",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "hbase-site/hbase.security.authorization",
+        "widget": {
+          "type": "toggle"
+        }
+      }
+    ]
+  }
+}
+

+ 6 - 0
ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HDFS/metainfo.xml

@@ -21,6 +21,12 @@
     <service>
       <name>HDFS</name>
       <version>2.6.0.2.2.0.0</version>
+      <themes>
+        <theme>
+          <fileName>theme.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
     </service>
   </services>
 </metainfo>

+ 179 - 0
ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HDFS/themes/theme.json

@@ -0,0 +1,179 @@
+{
+  "name": "default",
+  "description": "Default theme for HDFS service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "settings",
+            "display-name": "Settings",
+            "layout": {
+              "tab-columns": "2",
+              "tab-rows": "1",
+              "sections": [
+                {
+                  "name": "section-namenode",
+                  "display-name": "NameNode",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-namenode-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-datanode",
+                  "display-name": "DataNode",
+                  "row-index": "0",
+                  "column-index": "1",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-datanode-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "hdfs-site/dfs.namenode.name.dir",
+          "subsection-name": "subsection-namenode-col1"
+        },
+        {
+          "config": "hadoop-env/namenode_heapsize",
+          "subsection-name": "subsection-namenode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.namenode.handler.count",
+          "subsection-name": "subsection-namenode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.namenode.safemode.threshold-pct",
+          "subsection-name": "subsection-namenode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.datanode.data.dir",
+          "subsection-name": "subsection-datanode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.datanode.failed.volumes.tolerated",
+          "subsection-name": "subsection-datanode-col1"
+        },
+        {
+          "config": "hadoop-env/dtnode_heapsize",
+          "subsection-name": "subsection-datanode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.datanode.max.transfer.threads",
+          "subsection-name": "subsection-datanode-col1"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "hdfs-site/dfs.namenode.name.dir",
+        "widget": {
+          "type": "directories"
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.namenode.safemode.threshold-pct",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "percent"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.namenode.handler.count",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hadoop-env/namenode_heapsize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.datanode.failed.volumes.tolerated",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.datanode.data.dir",
+        "widget": {
+          "type": "directories"
+        }
+      },
+      {
+        "config": "hadoop-env/dtnode_heapsize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.datanode.max.transfer.threads",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      }
+    ]
+  }
+}
+

+ 139 - 0
ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HIVE/configuration/hive-env.xml

@@ -0,0 +1,139 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hive.client.heapsize</name>
+    <deleted>true</deleted>
+  </property>
+
+  <!-- hive-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for hive-env.sh file</description>
+    <value></value>
+  </property>
+
+  <property>
+    <name>hive_exec_orc_storage_strategy</name>
+    <display-name>ORC Storage Strategy</display-name>
+    <value>SPEED</value>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>SPEED</value>
+          <label>Speed</label>
+        </entry>
+        <entry>
+          <value>COMPRESSION</value>
+          <label>Compression</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive_txn_acid</name>
+    <display-name>ACID Transactions</display-name>
+    <value>off</value>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>on</value>
+          <label>On</label>
+        </entry>
+        <entry>
+          <value>off</value>
+          <label>Off</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>cost_based_optimizer</name>
+    <display-name>Cost Based Optimizer</display-name>
+    <value>Off</value>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>On</value>
+          <label>On</label>
+        </entry>
+        <entry>
+          <value>Off</value>
+          <label>Off</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive_security_authorization</name>
+    <display-name>Choose Authorization</display-name>
+    <value>None</value>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>None</value>
+          <label>None</label>
+        </entry>
+        <entry>
+          <value>SQLStdAuth</value>
+          <label>SQLStdAuth</label>
+        </entry>
+        <entry>
+          <value>Ranger</value>
+          <label>Ranger</label>
+        </entry>
+      </entries>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive_timeline_logging_enabled</name>
+    <display-name>Use ATS Logging</display-name>
+    <value>true</value>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+</configuration>

+ 573 - 5
ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HIVE/configuration/hive-site.xml

@@ -23,6 +23,21 @@ limitations under the License.
     <name>hive.cbo.enable</name>
     <value>true</value>
     <description>Flag to control enabling Cost Based Optimizations using Calcite framework.</description>
+    <display-name>Enable Cost Based Optimizer</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>On</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Off</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
   </property>
 
   <property>
@@ -160,6 +175,14 @@ limitations under the License.
     <name>hive.exec.reducers.bytes.per.reducer</name>
     <value>67108864</value>
     <description>size per reducer.The default is 256Mb, i.e if the input size is 1G, it will use 4 reducers.</description>
+    <display-name>Data per Reducer</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>64</minimum>
+      <maximum>4294967296</maximum>
+      <unit>B</unit>
+      <step-increment></step-increment>
+    </value-attributes>
   </property>
 
   <property>
@@ -233,6 +256,27 @@ limitations under the License.
       in case the user accidentally overwrites all partitions.
       NonStrict allows all partitions of a table to be dynamic.
     </description>
+    <display-name>Allow all partitions to be Dynamic</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>nonstrict</value>
+          <label>On</label>
+        </entry>
+        <entry>
+          <value>strict</value>
+          <label>Off</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_txn_acid</name>
+      </property>
+    </depends-on>
   </property>
 
   <property>
@@ -445,12 +489,35 @@ limitations under the License.
     <name>hive.exec.orc.default.stripe.size</name>
     <value>67108864</value>
     <description>Define the default ORC stripe size</description>
+    <display-name>Default ORC Stripe Size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>8388608</minimum>
+      <maximum>268435456</maximum>
+      <unit>B</unit>
+      <increment-step>8388608</increment-step>
+    </value-attributes>
   </property>
 
   <property>
     <name>hive.exec.orc.default.compress</name>
     <value>ZLIB</value>
     <description>Define the default compression codec for ORC file</description>
+    <display-name>ORC Compression Algorithm</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>ZLIB</value>
+          <label>zlib Compression Library</label>
+        </entry>
+        <entry>
+          <value>SNAPPY</value>
+          <label>Snappy Compression Library</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
   </property>
 
   <property>
@@ -517,6 +584,27 @@ limitations under the License.
     <name>hive.enforce.bucketing</name>
     <value>true</value>
     <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.</description>
+    <display-name>Enforce bucketing</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_txn_acid</name>
+      </property>
+    </depends-on>
   </property>
   <property>
     <name>hive.enforce.sorting</name>
@@ -600,6 +688,21 @@ limitations under the License.
       This way we can keep only one record writer open for each partition value
       in the reducer thereby reducing the memory pressure on reducers.
     </description>
+    <display-name>Sort Partitions Dynamically</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
   </property>
   <property>
     <name>hive.stats.autogather</name>
@@ -626,6 +729,27 @@ limitations under the License.
       from metastore. When this flag is disabled, Hive will make calls to filesystem to get file sizes
       and will estimate the number of rows from row schema.
     </description>
+    <display-name>Fetch partition stats at compiler</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>On</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Off</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>cost_based_optimizer</name>
+      </property>
+    </depends-on>
   </property>
   <property>
     <name>hive.stats.fetch.column.stats</name>
@@ -636,6 +760,27 @@ limitations under the License.
       can be expensive when the number of columns is high. This flag can be used to disable fetching
       of column statistics from metastore.
     </description>
+    <display-name>Fetch column stats at compiler</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>On</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Off</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>cost_based_optimizer</name>
+      </property>
+    </depends-on>
   </property>
 
   <property>
@@ -654,6 +799,27 @@ limitations under the License.
     <name>hive.txn.manager</name>
     <value>org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager</value>
     <description/>
+    <display-name>Transaction Manager</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager</value>
+          <label>org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager (off)</label>
+        </entry>
+        <entry>
+          <value>org.apache.hadoop.hive.ql.lockmgr.DbTxnManager</value>
+          <label>org.apache.hadoop.hive.ql.lockmgr.DbTxnManager (on)</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_txn_acid</name>
+      </property>
+    </depends-on>
   </property>
 
   <property>
@@ -673,6 +839,27 @@ limitations under the License.
     <description>
       Support concurrency and use locks, needed for Transactions. Requires Zookeeper.
     </description>
+    <display-name>Use Locking</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_txn_acid</name>
+      </property>
+    </depends-on>
   </property>
 
   <property>
@@ -743,6 +930,27 @@ limitations under the License.
     <name>hive.security.authorization.enabled</name>
     <value>false</value>
     <description>enable or disable the Hive client authorization</description>
+    <display-name>Enable Authorization</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_security_authorization</name>
+      </property>
+    </depends-on>
   </property>
 
   <property>
@@ -755,12 +963,19 @@ limitations under the License.
   </property>
   <property>
     <name>hive.security.metastore.authorization.manager</name>
+    <display-name>Hive Authorization Manager</display-name>
     <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider,org.apache.hadoop.hive.ql.security.authorization.MetaStoreAuthzAPIAuthorizerEmbedOnly</value>
     <description>
       authorization manager class name to be used in the metastore for authorization.
       The user defined authorization class should implement interface
       org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.
     </description>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_security_authorization</name>
+      </property>
+    </depends-on>
   </property>
   <property>
     <name>hive.security.metastore.authorization.auth.reads</name>
@@ -865,6 +1080,33 @@ limitations under the License.
     <name>hive.server2.authentication</name>
     <description>Authentication mode, default NONE. Options are NONE, NOSASL, KERBEROS, LDAP, PAM and CUSTOM</description>
     <value>NONE</value>
+    <display-name>HiveServer2 Authentication</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>NONE</value>
+          <label>None</label>
+        </entry>
+        <entry>
+          <value>LDAP</value>
+          <label>LDAP</label>
+        </entry>
+        <entry>
+          <value>KERBEROS</value>
+          <label>Kerberos</label>
+        </entry>
+        <entry>
+          <value>PAM</value>
+          <label>PAM</label>
+        </entry>
+        <entry>
+          <value>CUSTOM</value>
+          <label>Custom</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
   </property>
 
   <property>
@@ -886,6 +1128,21 @@ limitations under the License.
       Setting this property to true will have HiveServer2 execute
       Hive operations as the user making the calls to it.
     </description>
+    <display-name>Run as end user instead of Hive user</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
   </property>
   <property>
     <name>hive.server2.table.type.mapping</name>
@@ -903,6 +1160,21 @@ limitations under the License.
     <name>hive.server2.use.SSL</name>
     <value>false</value>
     <description/>
+    <display-name>Use SSL</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
   </property>
 
   <property>
@@ -934,11 +1206,33 @@ limitations under the License.
     <name>hive.prewarm.enabled</name>
     <value>false</value>
     <description>Enables container prewarm for Tez (Hadoop 2 only)</description>
+    <display-name>Hold Containers to Reduce Latency</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
   </property>
   <property>
     <name>hive.prewarm.numcontainers</name>
-    <value>10</value>
+    <value>3</value>
     <description>Controls the number of containers to prewarm for Tez (Hadoop 2 only)</description>
+    <display-name>Number of Containers Held</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>1</minimum>
+      <maximum>20</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
   </property>
 
   <property>
@@ -968,6 +1262,21 @@ limitations under the License.
       and set parallelism estimates. Tez will sample source vertices' output sizes and adjust the estimates at runtime as
       necessary.
     </description>
+    <display-name>Allow dynamic numbers of reducers</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
   </property>
   <property>
     <name>hive.tez.max.partition.factor</name>
@@ -986,6 +1295,21 @@ limitations under the License.
     <name>hive.tez.dynamic.partition.pruning</name>
     <value>true</value>
     <description>When dynamic pruning is enabled, joins on partition keys will be processed by sending events from the processing vertices to the tez application master. These events will be used to prune unnecessary partitions.</description>
+    <display-name>Allow dynamic partition pruning</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
   </property>
   <property>
     <name>hive.tez.dynamic.partition.pruning.max.event.size</name>
@@ -1051,8 +1375,44 @@ limitations under the License.
       This flag should be set to true to enable vectorized mode of query execution.
       The default value is false.
     </description>
+    <display-name>Enable Vectorization and Map Vectorization</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hive.auto.convert.join.noconditionaltask.size</name>
+    <value>52428800</value>
+    <description>If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. However, if it
+      is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
+      converted to a mapjoin(there is no conditional task). The default is 10MB.
+    </description>
+    <display-name>For Map Join, per Map memory threshold</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>8192</minimum>
+      <maximum>17179869184</maximum>
+      <unit>B</unit>
+      <step-increment></step-increment>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-site</type>
+        <name>hive.tez.container.size</name>
+      </property>
+    </depends-on>
   </property>
-
   <property>
     <name>hive.vectorized.execution.reduce.enabled</name>
     <value>false</value>
@@ -1066,15 +1426,45 @@ limitations under the License.
     <name>hive.optimize.index.filter</name>
     <value>true</value>
     <description>Whether to enable automatic use of indexes</description>
+    <display-name>Push Filters to Storage</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
   </property>
 
   <property>
     <name>hive.execution.engine</name>
-    <value>mr</value>
+    <value>tez</value>
     <description>
       Expects one of [mr, tez].
       Chooses execution engine. Options are: mr (Map reduce, default) or tez (hadoop 2 only)
     </description>
+    <display-name>Exection Engine</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>mr</value>
+          <label>MapReduce</label>
+        </entry>
+        <entry>
+          <value>tez</value>
+          <label>TEZ</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
   </property>
 
   <property>
@@ -1097,6 +1487,21 @@ limitations under the License.
       stored in metastore. For basic stats collection turn on the config hive.stats.autogather to true.
       For more advanced stats collection need to run analyze table queries.
     </description>
+    <display-name>Compute simple queries using stats only</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
   </property>
 
   <property>
@@ -1113,6 +1518,13 @@ limitations under the License.
       launched on each of the queues specified by "hive.server2.tez.default.queues".
       Determines the parallelism on each queue.
     </description>
+    <display-name>Session per queue</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>1</minimum>
+      <maximum>10</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
   </property>
 
   <property>
@@ -1123,7 +1535,163 @@ limitations under the License.
       turning on Tez for HiveServer2. The user could potentially want to run queries
       over Tez without the pool of sessions.
     </description>
+    <display-name>Start Tez session at Initialization</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hive.exec.orc.encoding.strategy</name>
+    <value>SPEED</value>
+    <description>
+      Define the encoding strategy to use while writing data. Changing this 
+      will only affect the light weight encoding for integers. This flag will not change 
+      the compression level of higher level compression codec (like ZLIB). Possible 
+      options are SPEED and COMPRESSION.
+    </description>
+    <display-name>ORC Encoding Strategy</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>SPEED</value>
+          <label>Speed</label>
+        </entry>
+        <entry>
+          <value>COMPRESSION</value>
+          <label>Compression</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_exec_orc_storage_strategy</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>hive.exec.orc.compression.strategy</name>
+    <value>SPEED</value>
+    <description>
+      Define the compression strategy to use while writing data. This changes the 
+      compression level of higher level compression codec (like ZLIB).
+    </description>
+    <display-name>ORC Compression Strategy</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>SPEED</value>
+          <label>Speed</label>
+        </entry>
+        <entry>
+          <value>COMPRESSION</value>
+          <label>Compression</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_exec_orc_storage_strategy</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>hive.vectorized.execution.reduce.enabled</name>
+    <value>false</value>
+    <description>
+      This flag should be set to true to enable vectorized mode of the reduce-side of 
+      query execution.
+    </description>
+    <display-name>Enable Reduce Vectorization</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hive.server2.authentication.ldap.url</name>
+    <value> </value>
+    <depends-on>
+      <property>
+        <type>hive-site</type>
+        <name>hive.server2.authentication</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>hive.server2.authentication.ldap.baseDN</name>
+    <value></value>
+    <depends-on>
+      <property>
+        <type>hive-site</type>
+        <name>hive.server2.authentication</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>hive.server2.authentication.kerberos.keytab</name>
+    <value></value>
+    <depends-on>
+      <property>
+        <type>hive-site</type>
+        <name>hive.server2.authentication</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>hive.server2.authentication.kerberos.principal</name>
+    <value></value>
+    <depends-on>
+      <property>
+        <type>hive-site</type>
+        <name>hive.server2.authentication</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>hive.server2.authentication.pam.services</name>
+    <value></value>
+    <depends-on>
+      <property>
+        <type>hive-site</type>
+        <name>hive.server2.authentication</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>hive.server2.custom.authentication.class</name>
+    <value></value>
+    <depends-on>
+      <property>
+        <type>hive-site</type>
+        <name>hive.server2.authentication</name>
+      </property>
+    </depends-on>
   </property>
-
-
 </configuration>

+ 100 - 0
ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HIVE/configuration/hiveserver2-site.xml

@@ -32,4 +32,104 @@ limitations under the License.
     The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
   </property>
   
+  <property>
+    <name>tez.session.am.dag.submit.timeout.secs</name>
+    <value>600</value>
+    <description></description>
+    <display-name>Max idle tez session length</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>600</minimum>
+      <maximum>86400</maximum>
+      <unit>seconds</unit>
+      <step-increment>600</step-increment>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.server2.enable.doAs</name>
+    <value>true</value>
+    <description>
+      Setting this property to true will have HiveServer2 execute
+      Hive operations as the user making the calls to it.
+    </description>
+    <display-name>Run as end user instead of Hive user</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.server2.tez.sessions.per.default.queue</name>
+    <value>1</value>
+    <description>
+      A positive integer that determines the number of Tez sessions that should be
+      launched on each of the queues specified by "hive.server2.tez.default.queues".
+      Determines the parallelism on each queue.
+    </description>
+    <display-name>Session per queue</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>1</minimum>
+      <maximum>10</maximum>
+      <step-increment>1</step-increment>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.server2.tez.default.queues</name>
+    <display-name>Default query queues</display-name>
+    <value>default</value>
+    <description>
+      A list of comma separated values corresponding to YARN queues of the same name.
+      When HiveServer2 is launched in Tez mode, this configuration needs to be set
+      for multiple Tez sessions to run in parallel on the cluster.
+    </description>
+    <value-attributes>
+      <type>combo</type>
+      <entries>
+        <entry>
+          <value>default</value>
+          <label>Default</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1+</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.server2.tez.initialize.default.sessions</name>
+    <value>false</value>
+    <description>
+      This flag is used in HiveServer2 to enable a user to use HiveServer2 without
+      turning on Tez for HiveServer2. The user could potentially want to run queries
+      over Tez without the pool of sessions.
+    </description>
+    <display-name>Start Tez session at Initialization</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
 </configuration>

+ 6 - 0
ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HIVE/metainfo.xml

@@ -21,6 +21,12 @@
     <service>
       <name>HIVE</name>
       <version>0.14.0.2.2.0.0</version>
+      <themes>
+        <theme>
+          <fileName>theme.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
     </service>
   </services>
 </metainfo>

+ 408 - 0
ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HIVE/themes/theme.json

@@ -0,0 +1,408 @@
+{
+  "name": "default",
+  "description": "Default theme for HIVE service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "settings",
+            "display-name": "Settings",
+            "layout": {
+              "tab-rows": 6,
+              "tab-columns": 3,
+              "sections": [
+                 {
+                  "name": "acid-transactions",
+                  "display-name": "ACID Transactions",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "acid-transactions-row1-col1-1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "interactive-query",
+                  "display-name": "Interactive Query",
+                  "row-index": "0",
+                  "column-index": "1",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "interactive-query-row1-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "security",
+                  "display-name": "Security",
+                  "row-index": "0",
+                  "column-index": "2",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "security-row1-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "optimization",
+                  "display-name": "Optimization",
+                  "row-index": "1",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "3",
+                  "section-columns": "3",
+                  "section-rows": "2",
+                  "subsections": [
+                    {
+                      "name": "optimization-row1-col1",
+                      "display-name": "Tez",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "optimization-row1-col2",
+                      "display-name": "",
+                      "row-index": "0",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "optimization-row1-col3",
+                      "display-name": "CBO",
+                      "row-index": "0",
+                      "column-index": "2",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "optimization-row2-col1",
+                      "display-name": "Storage",
+                      "row-index": "1",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "optimization-row2-col2",
+                      "display-name": "",
+                      "row-index": "1",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "optimization-row2-col3",
+                      "display-name": "Memory",
+                      "row-index": "1",
+                      "column-index": "2",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "hive-site/hive.exec.orc.default.stripe.size",
+          "subsection-name": "optimization-row2-col1"
+        },
+        {
+          "config": "hive-site/hive.exec.orc.default.compress",
+          "subsection-name": "optimization-row2-col1"
+        },
+        {
+          "config": "hive-env/hive_exec_orc_storage_strategy",
+          "subsection-name": "optimization-row2-col2"
+        },
+        {
+          "config": "hive-site/hive.auto.convert.join.noconditionaltask.size",
+          "subsection-name": "optimization-row2-col3"
+        },
+        {
+          "config": "hive-site/hive.exec.reducers.bytes.per.reducer",
+          "subsection-name": "optimization-row2-col3"
+        },
+        {
+          "config": "hive-env/hive_txn_acid",
+          "subsection-name": "acid-transactions-row1-col1-1"
+        },
+        {
+          "config": "hive-site/hive.compactor.initiator.on",
+          "subsection-name": "acid-transactions-row1-col1-1"
+        },
+        {
+          "config": "hive-site/hive.compactor.worker.threads",
+          "subsection-name": "acid-transactions-row1-col1-1"
+        },
+        {
+          "config": "hive-site/hive.execution.engine",
+          "subsection-name": "optimization-row1-col1"
+        },
+        {
+          "config": "hive-site/hive.tez.container.size",
+          "subsection-name": "optimization-row1-col1"
+        },
+        {
+          "config": "hive-site/hive.prewarm.enabled",
+          "subsection-name": "optimization-row1-col2"
+        },
+        {
+          "config": "hive-site/hive.prewarm.numcontainers",
+          "subsection-name": "optimization-row1-col2"
+        },
+        {
+          "config": "hive-site/hive.cbo.enable",
+          "subsection-name": "optimization-row1-col3"
+        },
+        {
+          "config": "hive-site/hive.stats.fetch.column.stats",
+          "subsection-name": "optimization-row1-col3"
+        },
+        {
+          "config": "hiveserver2-site/hive.server2.tez.default.queues",
+          "subsection-name": "interactive-query-row1-col1"
+        },
+        {
+          "config": "hiveserver2-site/hive.server2.tez.initialize.default.sessions",
+          "subsection-name": "interactive-query-row1-col1"
+        },
+        {
+          "config": "hiveserver2-site/hive.server2.tez.sessions.per.default.queue",
+          "subsection-name": "interactive-query-row1-col1"
+        },
+        {
+          "config": "hiveserver2-site/tez.session.am.dag.submit.timeout.secs",
+          "subsection-name": "interactive-query-row1-col1"
+        },
+        {
+          "config": "hive-env/hive_security_authorization",
+          "subsection-name": "security-row1-col1"
+        },
+        {
+          "config": "hiveserver2-site/hive.server2.enable.doAs",
+          "subsection-name": "security-row1-col1"
+        },
+        {
+          "config": "hive-site/hive.server2.authentication",
+          "subsection-name": "security-row1-col1"
+        },
+        {
+          "config": "hive-site/hive.server2.use.SSL",
+          "subsection-name": "security-row1-col1"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "hive-site/hive.exec.orc.default.stripe.size",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hive-site/hive.exec.orc.default.compress",
+        "widget": {
+          "type": "combo"
+        }
+      },
+      {
+        "config": "hive-env/hive_exec_orc_storage_strategy",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "hive-site/hive.auto.convert.join.noconditionaltask.size",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hive-site/hive.exec.reducers.bytes.per.reducer",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hive-env/hive_txn_acid",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "hive-site/hive.compactor.initiator.on",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "hive-site/hive.compactor.worker.threads",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hive-site/hive.execution.engine",
+        "widget": {
+          "type": "combo"
+        }
+      },
+      {
+        "config": "hive-site/hive.tez.container.size",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hive-site/hive.prewarm.enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "hive-site/hive.prewarm.numcontainers",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hive-site/hive.cbo.enable",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "hive-site/hive.stats.fetch.column.stats",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "hiveserver2-site/hive.server2.tez.initialize.default.sessions",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "hiveserver2-site/hive.server2.tez.sessions.per.default.queue",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hiveserver2-site/hive.server2.enable.doAs",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "hiveserver2-site/tez.session.am.dag.submit.timeout.secs",
+        "widget": {
+          "type": "time-interval-spinner",
+          "units": [
+            {
+              "unit-name": "hours,minutes"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hive-env/hive_security_authorization",
+        "widget": {
+          "type": "combo"
+        }
+      },
+      {
+        "config": "hive-site/hive.server2.authentication",
+        "widget": {
+          "type": "combo"
+        }
+      },
+      {
+        "config": "hive-site/hive.server2.use.SSL",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "hiveserver2-site/hive.server2.tez.default.queues",
+        "widget": {
+          "type": "list"
+        }
+      }
+    ]
+  }
+}
+

+ 15 - 1
ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/capacity-scheduler.xml

@@ -19,7 +19,21 @@
   <property>
     <name>yarn.scheduler.capacity.resource-calculator</name>
     <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
-    <description></description>
+    <display-name>CPU Scheduling</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>org.apache.hadoop.yarn.util.resource.DominantResourceCalculator</value>
+          <label>Enabled</label>
+        </entry>
+        <entry>
+          <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
+          <label>Disabled</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.accessible-node-labels</name>

+ 44 - 0
ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/yarn-env.xml

@@ -0,0 +1,44 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>yarn_cgroups_enabled</name>
+    <value>false</value>
+    <description>You can use CGroups to isolate CPU-heavy processes in a Hadoop cluster.</description>
+    <display-name>CPU Isolation</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Enabled</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Disabled</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+</configuration>

+ 104 - 1
ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/yarn-site.xml

@@ -77,6 +77,10 @@
     <description>
       Enable RM work preserving recovery. This configuration is private to YARN for experimenting the feature.
     </description>
+    <display-name>Enable Work Preserving Restart</display-name>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
   </property>
   <property>
     <name>yarn.resourcemanager.store.class</name>
@@ -183,11 +187,30 @@
     <name>yarn.nodemanager.resource.cpu-vcores</name>
     <value>8</value>
     <description></description>
+    <display-name>Number of virtual cores</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>32</maximum>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.nodemanager.resource.percentage-physical-cpu-limit</name>
+      </property>
+    </depends-on>
   </property>
   <property>
     <name>yarn.nodemanager.resource.percentage-physical-cpu-limit</name>
-    <value>100</value>
+    <value>80</value>
     <description>The amount of CPU allocated for YARN containers - only effective when used with CGroups</description>
+    <display-name>Percentage of physical CPU allocated for all containers on a node</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>100</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
   </property>
   <property>
     <name>yarn.node-labels.manager-class</name>
@@ -323,4 +346,84 @@
     <value>/system/yarn/node-labels</value>
     <description></description>
   </property>
+  <property>
+    <name>yarn.scheduler.minimum-allocation-vcores</name>
+    <value>1</value>
+    <description></description>
+    <display-name>Minimum Container Size (VCores)</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>8</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.nodemanager.resource.cpu-vcores</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>yarn.scheduler.maximum-allocation-vcores</name>
+    <value>8</value>
+    <description></description>
+    <display-name>Maximum Container Size (VCores)</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>8</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.nodemanager.resource.cpu-vcores</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>yarn.node-labels.enabled</name>
+    <value>false</value>
+    <description>
+      Enable node labels to restrict YARN applications so that they run only on cluster nodes that have a specified node label.
+    </description>
+    <display-name>Node Labels</display-name>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Enabled</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Disabled</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.scheduler.monitor.enable</name>
+    <value>false</value>
+    <display-name>Pre-emption</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Enabled</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Disabled</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
 </configuration>

+ 13 - 0
ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/metainfo.xml

@@ -22,11 +22,24 @@
     <service>
       <name>YARN</name>
       <version>2.6.0.2.2.0.0</version>
+      <themes>
+        <theme>
+          <fileName>theme.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
      </service>
     <service>
       <name>MAPREDUCE2</name>
       <version>2.6.0.2.2.0.0</version>
       <configuration-dir>configuration-mapred</configuration-dir>
+      <themes-dir>themes-mapred</themes-dir>
+      <themes>
+        <theme>
+          <fileName>theme.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
     </service>
   </services>
 </metainfo>

+ 132 - 0
ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/themes-mapred/theme.json

@@ -0,0 +1,132 @@
+{
+  "name": "default",
+  "description": "Default theme for MAPREDUCE service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "settings",
+            "display-name": "Settings",
+            "layout": {
+              "tab-columns": "1",
+              "tab-rows": "1",
+              "sections": [
+                {
+                  "name": "section-mr-scheduler",
+                  "display-name": "MapReduce",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "3",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-mr-scheduler-row1-col1",
+                      "display-name": "MapReduce Framework",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-mr-scheduler-row1-col2",
+                      "row-index": "0",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-mr-scheduler-row1-col3",
+                      "row-index": "0",
+                      "column-index": "2",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-mr-scheduler-row2-col1",
+                      "display-name": "MapReduce AppMaster",
+                      "row-index": "1",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "3"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "mapred-site/mapreduce.map.memory.mb",
+          "subsection-name": "subsection-mr-scheduler-row1-col1"
+        },
+        {
+          "config": "mapred-site/mapreduce.reduce.memory.mb",
+          "subsection-name": "subsection-mr-scheduler-row1-col2"
+        },
+        {
+          "config": "mapred-site/yarn.app.mapreduce.am.resource.mb",
+          "subsection-name": "subsection-mr-scheduler-row2-col1"
+        },
+        {
+          "config": "mapred-site/mapreduce.task.io.sort.mb",
+          "subsection-name": "subsection-mr-scheduler-row1-col3"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "mapred-site/mapreduce.map.memory.mb",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "mapred-site/mapreduce.reduce.memory.mb",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "mapred-site/yarn.app.mapreduce.am.resource.mb",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "mapred-site/mapreduce.task.io.sort.mb",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      }
+    ]
+  }
+}

+ 250 - 0
ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/themes/theme.json

@@ -0,0 +1,250 @@
+{
+  "name": "default",
+  "description": "Default theme for YARN service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "settings",
+            "display-name": "Settings",
+            "layout": {
+              "tab-columns": "3",
+              "tab-rows": "2",
+              "sections": [
+                {
+                  "name": "section-nm-sizing",
+                  "display-name": "Memory",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "2",
+                  "section-columns": "2",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-nm-sizing-col1",
+                      "display-name": "Node",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-nm-sizing-col2",
+                      "display-name": "Container",
+                      "row-index": "0",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-yarn-platform-features",
+                  "display-name": "YARN Features",
+                  "row-index": "0",
+                  "column-index": "2",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-yarn-platform-features-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-container-sizing",
+                  "display-name": "CPU",
+                  "row-index": "1",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "2",
+                  "section-columns": "2",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-container-sizing-col1",
+                      "display-name": "Node",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-container-sizing-col2",
+                      "display-name": "Container",
+                      "row-index": "0",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "yarn-site/yarn.nodemanager.resource.memory-mb",
+          "subsection-name": "subsection-nm-sizing-col1"
+        },
+        {
+          "config": "yarn-site/yarn.scheduler.minimum-allocation-mb",
+          "subsection-name": "subsection-nm-sizing-col2"
+        },
+        {
+          "config": "yarn-site/yarn.scheduler.maximum-allocation-mb",
+          "subsection-name": "subsection-nm-sizing-col2"
+        },
+        {
+          "config": "yarn-site/yarn.node-labels.enabled",
+          "subsection-name": "subsection-yarn-platform-features-col1"
+        },
+        {
+          "config": "yarn-site/yarn.resourcemanager.scheduler.monitor.enable",
+          "subsection-name": "subsection-yarn-platform-features-col1"
+        },
+        {
+          "config": "capacity-scheduler/yarn.scheduler.capacity.resource-calculator",
+          "subsection-name": "subsection-container-sizing-col1"
+        },
+        {
+          "config": "yarn-env/yarn_cgroups_enabled",
+          "subsection-name": "subsection-container-sizing-col1"
+        },
+        {
+          "config": "yarn-site/yarn.nodemanager.resource.percentage-physical-cpu-limit",
+          "subsection-name": "subsection-container-sizing-col1"
+        },
+        {
+          "config": "yarn-site/yarn.nodemanager.resource.cpu-vcores",
+          "subsection-name": "subsection-container-sizing-col1"
+        },
+        {
+          "config": "yarn-site/yarn.scheduler.minimum-allocation-vcores",
+          "subsection-name": "subsection-container-sizing-col2"
+        },
+        {
+          "config": "yarn-site/yarn.scheduler.maximum-allocation-vcores",
+          "subsection-name": "subsection-container-sizing-col2"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "yarn-site/yarn.nodemanager.resource.memory-mb",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "yarn-site/yarn.nodemanager.resource.percentage-physical-cpu-limit",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "percent"
+            }
+          ]
+        }
+      },
+      {
+        "config": "yarn-site/yarn.nodemanager.resource.cpu-vcores",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "yarn-site/yarn.scheduler.minimum-allocation-mb",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "yarn-site/yarn.scheduler.maximum-allocation-mb",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "yarn-site/yarn.scheduler.minimum-allocation-vcores",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "yarn-site/yarn.scheduler.maximum-allocation-vcores",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "yarn-site/yarn.node-labels.enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "yarn-env/yarn_cgroups_enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "yarn-site/yarn.resourcemanager.scheduler.monitor.enable",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "capacity-scheduler/yarn.scheduler.capacity.resource-calculator",
+        "widget": {
+          "type": "toggle"
+        }
+      }
+    ]
+  }
+}

+ 497 - 69
ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/stack_advisor.py

@@ -18,6 +18,8 @@ limitations under the License.
 """
 
 import math
+from math import floor
+from urlparse import urlparse
 
 class HDPWIN22StackAdvisor(HDPWIN21StackAdvisor):
 
@@ -39,7 +41,10 @@ class HDPWIN22StackAdvisor(HDPWIN21StackAdvisor):
     putHdfsSiteProperty = self.putProperty(configurations, "hdfs-site", services)
     putHdfsSiteProperty("dfs.datanode.max.transfer.threads", 16384 if clusterData["hBaseInstalled"] else 4096)
     dataDirsCount = 1
-    if "dfs.datanode.data.dir" in configurations["hdfs-site"]["properties"]:
+    # Use users 'dfs.datanode.data.dir' first
+    if "hdfs-site" in services["configurations"] and "dfs.datanode.data.dir" in services["configurations"]["hdfs-site"]["properties"]:
+      dataDirsCount = len(str(services["configurations"]["hdfs-site"]["properties"]["dfs.datanode.data.dir"]).split(","))
+    elif "dfs.datanode.data.dir" in configurations["hdfs-site"]["properties"]:
       dataDirsCount = len(str(configurations["hdfs-site"]["properties"]["dfs.datanode.data.dir"]).split(","))
     if dataDirsCount <= 2:
       failedVolumesTolerated = 0
@@ -57,31 +62,38 @@ class HDPWIN22StackAdvisor(HDPWIN21StackAdvisor):
       nameNodeCores = int(namenodeHosts[0]['Hosts']['cpu_count'])
     putHdfsSiteProperty("dfs.namenode.handler.count", 25*nameNodeCores)
 
-    putHdfsSiteProperty("dfs.namenode.safemode.threshold-pct", "0.99f" if len(namenodeHosts) > 1 else "1.0f")
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    if ('ranger-hdfs-plugin-properties' in services['configurations']) and ('ranger-hdfs-plugin-enabled' in services['configurations']['ranger-hdfs-plugin-properties']['properties']):
+      rangerPluginEnabled = services['configurations']['ranger-hdfs-plugin-properties']['properties']['ranger-hdfs-plugin-enabled']
+      if ("RANGER" in servicesList) and (rangerPluginEnabled.lower() == 'Yes'.lower()):
+        putHdfsSiteProperty("dfs.permissions.enabled",'true')
+
+    putHdfsSiteProperty("dfs.namenode.safemode.threshold-pct", "0.999" if len(namenodeHosts) > 1 else "1.000")
 
     putHdfsEnvProperty = self.putProperty(configurations, "hadoop-env", services)
     putHdfsEnvPropertyAttribute = self.putPropertyAttribute(configurations, "hadoop-env")
 
     putHdfsEnvProperty('namenode_heapsize', max(int(clusterData['totalAvailableRam'] / 2), 1024))
-    putHdfsEnvProperty('namenode_opt_newsize', max(int(clusterData['totalAvailableRam'] / 8), 128))
-    putHdfsEnvProperty('namenode_opt_maxnewsize', max(int(clusterData['totalAvailableRam'] / 8), 256))
 
-    nn_max_heapsize=None
+    nn_heapsize_limit = None
     if (namenodeHosts is not None and len(namenodeHosts) > 0):
       if len(namenodeHosts) > 1:
         nn_max_heapsize = min(int(namenodeHosts[0]["Hosts"]["total_mem"]), int(namenodeHosts[1]["Hosts"]["total_mem"])) / 1024
+        masters_at_host = max(self.getHostComponentsByCategories(namenodeHosts[0]["Hosts"]["host_name"], ["MASTER"], services, hosts),
+                              self.getHostComponentsByCategories(namenodeHosts[1]["Hosts"]["host_name"], ["MASTER"], services, hosts))
       else:
         nn_max_heapsize = int(namenodeHosts[0]["Hosts"]["total_mem"] / 1024) # total_mem in kb
+        masters_at_host = self.getHostComponentsByCategories(namenodeHosts[0]["Hosts"]["host_name"], ["MASTER"], services, hosts)
 
       putHdfsEnvPropertyAttribute('namenode_heapsize', 'maximum', max(nn_max_heapsize, 1024))
 
-      nn_heapsize = nn_max_heapsize
-      nn_heapsize -= clusterData["reservedRam"]
-      if clusterData["hBaseInstalled"]:
-        nn_heapsize -= clusterData["hbaseRam"]
-      putHdfsEnvProperty('namenode_heapsize', max(int(nn_heapsize / 2), 1024))
-      putHdfsEnvProperty('namenode_opt_newsize', max(int(nn_heapsize / 8), 128))
-      putHdfsEnvProperty('namenode_opt_maxnewsize', max(int(nn_heapsize / 8), 256))
+      nn_heapsize_limit = nn_max_heapsize
+      nn_heapsize_limit -= clusterData["reservedRam"]
+      if len(masters_at_host) > 1:
+        nn_heapsize_limit = int(nn_heapsize_limit/2)
+
+      putHdfsEnvProperty('namenode_heapsize', max(nn_heapsize_limit, 1024))
+
 
     datanodeHosts = self.getHostsWithComponent("HDFS", "DATANODE", services, hosts)
     if datanodeHosts is not None and len(datanodeHosts) > 0:
@@ -123,13 +135,15 @@ class HDPWIN22StackAdvisor(HDPWIN21StackAdvisor):
       nn_memory_config = nn_memory_configs[index]
 
       #override with new values if applicable
-      if nn_max_heapsize is not None and nn_memory_config['nn_heap'] <= nn_max_heapsize:
+      if nn_heapsize_limit is not None and nn_memory_config['nn_heap'] <= nn_heapsize_limit:
         putHdfsEnvProperty('namenode_heapsize', nn_memory_config['nn_heap'])
-        putHdfsEnvProperty('namenode_opt_newsize', nn_memory_config['nn_opt'])
-        putHdfsEnvProperty('namenode_opt_maxnewsize', nn_memory_config['nn_opt'])
 
       putHdfsEnvPropertyAttribute('dtnode_heapsize', 'maximum', int(min_datanode_ram_kb/1024))
 
+    nn_heapsize = int(configurations["hadoop-env"]["properties"]["namenode_heapsize"])
+    putHdfsEnvProperty('namenode_opt_newsize', max(int(nn_heapsize / 8), 128))
+    putHdfsEnvProperty('namenode_opt_maxnewsize', max(int(nn_heapsize / 8), 128))
+
     putHdfsSitePropertyAttribute = self.putPropertyAttribute(configurations, "hdfs-site")
     putHdfsSitePropertyAttribute('dfs.datanode.failed.volumes.tolerated', 'maximum', dataDirsCount)
 
@@ -143,9 +157,14 @@ class HDPWIN22StackAdvisor(HDPWIN21StackAdvisor):
     putYarnPropertyAttribute = self.putPropertyAttribute(configurations, "yarn-site")
     nodeManagerHost = self.getHostWithComponent("YARN", "NODEMANAGER", services, hosts)
     if (nodeManagerHost is not None):
-      putYarnProperty('yarn.nodemanager.resource.cpu-vcores', nodeManagerHost["Hosts"]["cpu_count"] * 2)
+      cpuPercentageLimit = 0.8
+      if "yarn.nodemanager.resource.percentage-physical-cpu-limit" in configurations["yarn-site"]["properties"]:
+        cpuPercentageLimit = float(configurations["yarn-site"]["properties"]["yarn.nodemanager.resource.percentage-physical-cpu-limit"])
+      cpuLimit = max(1, int(floor(nodeManagerHost["Hosts"]["cpu_count"] * cpuPercentageLimit)))
+      putYarnProperty('yarn.nodemanager.resource.cpu-vcores', str(cpuLimit))
+      putYarnProperty('yarn.scheduler.maximum-allocation-vcores', configurations["yarn-site"]["properties"]["yarn.nodemanager.resource.cpu-vcores"])
       putYarnPropertyAttribute('yarn.nodemanager.resource.memory-mb', 'maximum', int(nodeManagerHost["Hosts"]["total_mem"] / 1024)) # total_mem in kb
-      putYarnPropertyAttribute('yarn.nodemanager.resource.cpu-vcores', 'maximum', nodeManagerHost["Hosts"]["cpu_count"] * 4)
+      putYarnPropertyAttribute('yarn.nodemanager.resource.cpu-vcores', 'maximum', nodeManagerHost["Hosts"]["cpu_count"] * 2)
       putYarnPropertyAttribute('yarn.scheduler.minimum-allocation-vcores', 'maximum', configurations["yarn-site"]["properties"]["yarn.nodemanager.resource.cpu-vcores"])
       putYarnPropertyAttribute('yarn.scheduler.maximum-allocation-vcores', 'maximum', configurations["yarn-site"]["properties"]["yarn.nodemanager.resource.cpu-vcores"])
       putYarnPropertyAttribute('yarn.scheduler.minimum-allocation-mb', 'maximum', configurations["yarn-site"]["properties"]["yarn.nodemanager.resource.memory-mb"])
@@ -194,6 +213,7 @@ class HDPWIN22StackAdvisor(HDPWIN21StackAdvisor):
     putHiveServerProperty = self.putProperty(configurations, "hiveserver2-site", services)
     putHiveEnvProperty = self.putProperty(configurations, "hive-env", services)
     putHiveSiteProperty = self.putProperty(configurations, "hive-site", services)
+    putHiveSitePropertyAttribute = self.putPropertyAttribute(configurations, "hive-site")
 
     servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
 
@@ -213,10 +233,6 @@ class HDPWIN22StackAdvisor(HDPWIN21StackAdvisor):
     putHiveSiteProperty("hive.vectorized.execution.enabled", "true")
     putHiveSiteProperty("hive.vectorized.execution.reduce.enabled", "false")
 
-    # Memory
-    putHiveSiteProperty("hive.auto.convert.join.noconditionaltask.size", "2147483648")
-    putHiveSiteProperty("hive.exec.reducers.bytes.per.reducer", "67108864")
-
     # Transactions
     putHiveEnvProperty("hive_txn_acid", "off")
     if str(configurations["hive-env"]["properties"]["hive_txn_acid"]).lower() == "on":
@@ -273,26 +289,34 @@ class HDPWIN22StackAdvisor(HDPWIN21StackAdvisor):
 
     if not "yarn-site" in configurations:
       self.recommendYARNConfigurations(configurations, clusterData, services, hosts)
+    #properties below should be always present as they are provided in HDP206 stack advisor at least
+    yarnMaxAllocationSize = min(30 * int(configurations["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"]), int(configurations["yarn-site"]["properties"]["yarn.scheduler.maximum-allocation-mb"]))
+    #duplicate tez task resource calc logic, direct dependency doesn't look good here (in case of Hive without Tez)
+    container_size = clusterData['mapMemory'] if clusterData['mapMemory'] > 2048 else int(clusterData['reduceMemory'])
+    container_size = min(clusterData['containers'] * clusterData['ramPerContainer'], container_size, yarnMaxAllocationSize)
 
-    if "yarn-site" in configurations and \
-                    "yarn.scheduler.minimum-allocation-mb" in configurations["yarn-site"]["properties"]:
-      container_size = configurations["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"]
     putHiveSiteProperty("hive.tez.container.size", container_size)
     putHiveSiteProperty("hive.prewarm.enabled", "false")
     putHiveSiteProperty("hive.prewarm.numcontainers", "3")
     putHiveSiteProperty("hive.tez.auto.reducer.parallelism", "true")
     putHiveSiteProperty("hive.tez.dynamic.partition.pruning", "true")
 
+    container_size = configurations["hive-site"]["properties"]["hive.tez.container.size"]
+    container_size_bytes = int(container_size)*1024*1024
+    # Memory
+    putHiveSiteProperty("hive.auto.convert.join.noconditionaltask.size", int(container_size_bytes/3))
+    putHiveSitePropertyAttribute("hive.auto.convert.join.noconditionaltask.size", "maximum", container_size_bytes)
+    putHiveSiteProperty("hive.exec.reducers.bytes.per.reducer", "67108864")
+
     # CBO
     putHiveEnvProperty("cost_based_optimizer", "On")
     if str(configurations["hive-env"]["properties"]["cost_based_optimizer"]).lower() == "on":
       putHiveSiteProperty("hive.cbo.enable", "true")
-      putHiveSiteProperty("hive.stats.fetch.partition.stats", "true")
-      putHiveSiteProperty("hive.stats.fetch.column.stats", "true")
     else:
       putHiveSiteProperty("hive.cbo.enable", "false")
-      putHiveSiteProperty("hive.stats.fetch.partition.stats", "false")
-      putHiveSiteProperty("hive.stats.fetch.column.stats", "false")
+    hive_cbo_enable = configurations["hive-site"]["properties"]["hive.cbo.enable"]
+    putHiveSiteProperty("hive.stats.fetch.partition.stats", hive_cbo_enable)
+    putHiveSiteProperty("hive.stats.fetch.column.stats", hive_cbo_enable)
     putHiveSiteProperty("hive.compute.query.using.stats ", "true")
 
     # Interactive Query
@@ -303,7 +327,7 @@ class HDPWIN22StackAdvisor(HDPWIN21StackAdvisor):
 
     yarn_queues = "default"
     if "capacity-scheduler" in configurations and \
-                    "yarn.scheduler.capacity.root.queues" in configurations["capacity-scheduler"]["properties"]:
+      "yarn.scheduler.capacity.root.queues" in configurations["capacity-scheduler"]["properties"]:
       yarn_queues = str(configurations["capacity-scheduler"]["properties"]["yarn.scheduler.capacity.root.queues"])
     putHiveServerProperty("hive.server2.tez.default.queues", yarn_queues)
 
@@ -321,19 +345,67 @@ class HDPWIN22StackAdvisor(HDPWIN21StackAdvisor):
     else:
       putHiveSiteProperty("hive.security.authorization.enabled", "true")
 
-    if str(configurations["hive-env"]["properties"]["hive_security_authorization"]).lower() == "sqlstdauth":
+    try:
       auth_manager_value = str(configurations["hive-env"]["properties"]["hive.security.metastore.authorization.manager"])
-      sqlstdauth_class = "org.apache.hadoop.hive.ql.security.authorization.MetaStoreAuthzAPIAuthorizerEmbedOnly"
+    except KeyError:
+      auth_manager_value = ''
+      pass
+    sqlstdauth_class = "org.apache.hadoop.hive.ql.security.authorization.MetaStoreAuthzAPIAuthorizerEmbedOnly"
+
+    if str(configurations["hive-env"]["properties"]["hive_security_authorization"]).lower() == "sqlstdauth":
       if sqlstdauth_class not in auth_manager_value:
         putHiveSiteProperty("hive.security.metastore.authorization.manager", auth_manager_value + "," + sqlstdauth_class)
+    elif auth_manager_value != '':
+      #remove item from csv
+      auth_manager_values = auth_manager_value.split(",")
+      auth_manager_values = [x for x in auth_manager_values if x != sqlstdauth_class]
+      putHiveSiteProperty("hive.security.metastore.authorization.manager", ",".join(auth_manager_values))
+      pass
 
     putHiveServerProperty("hive.server2.enable.doAs", "true")
     putHiveSiteProperty("hive.server2.use.SSL", "false")
 
+    #Hive authentication
+    hive_server2_auth = None
+    if "hive-site" in services["configurations"] and "hive.server2.authentication" in services["configurations"]["hive-site"]["properties"]:
+      hive_server2_auth = str(services["configurations"]["hive-site"]["properties"]["hive.server2.authentication"]).lower()
+    elif "hive.server2.authentication" in configurations["hive-site"]["properties"]:
+      hive_server2_auth = str(configurations["hive-site"]["properties"]["hive.server2.authentication"]).lower()
+
+    if hive_server2_auth == "ldap":
+      putHiveSiteProperty("hive.server2.authentication.ldap.url", "")
+      putHiveSiteProperty("hive.server2.authentication.ldap.baseDN", " ")
+    else:
+      putHiveSitePropertyAttribute("hive.server2.authentication.ldap.url", "delete", "true")
+      putHiveSitePropertyAttribute("hive.server2.authentication.ldap.baseDN", "delete", "true")
+
+    if hive_server2_auth == "kerberos":
+      putHiveSiteProperty("hive.server2.authentication.kerberos.keytab", "")
+      putHiveSiteProperty("hive.server2.authentication.kerberos.principal", "")
+    else:
+      putHiveSitePropertyAttribute("hive.server2.authentication.kerberos.keytab", "delete", "true")
+      putHiveSitePropertyAttribute("hive.server2.authentication.kerberos.principal", "delete", "true")
+
+    if hive_server2_auth == "pam":
+      putHiveSiteProperty("hive.server2.authentication.pam.services", "")
+    else:
+      putHiveSitePropertyAttribute("hive.server2.authentication.pam.services", "delete", "true")
+
+    if hive_server2_auth == "custom":
+      putHiveSiteProperty("hive.server2.custom.authentication.class", "")
+    else:
+      putHiveSitePropertyAttribute("hive.server2.custom.authentication.class", "delete", "true")
+
+
   def recommendHBASEConfigurations(self, configurations, clusterData, services, hosts):
     super(HDPWIN22StackAdvisor, self).recommendHbaseEnvConfigurations(configurations, clusterData, services, hosts)
     putHbaseEnvPropertyAttributes = self.putPropertyAttribute(configurations, "hbase-env")
 
+    hmaster_host = self.getHostWithComponent("HBASE", "HBASE_MASTER", services, hosts)
+    if hmaster_host is not None:
+      host_ram = hmaster_host["Hosts"]["total_mem"]
+      putHbaseEnvPropertyAttributes('hbase_master_heapsize', 'maximum', max(1024, int(host_ram/1024)))
+
     rs_hosts = self.getHostsWithComponent("HBASE", "HBASE_REGIONSERVER", services, hosts)
     if rs_hosts is not None and len(rs_hosts) > 0:
       min_ram = rs_hosts[0]["Hosts"]["total_mem"]
@@ -344,13 +416,26 @@ class HDPWIN22StackAdvisor(HDPWIN21StackAdvisor):
       putHbaseEnvPropertyAttributes('hbase_regionserver_heapsize', 'maximum', max(1024, int(min_ram*0.8/1024)))
 
     putHbaseSiteProperty = self.putProperty(configurations, "hbase-site", services)
-    putHbaseSiteProperty("hbase.regionserver.global.memstore.upperLimit", '0.4')
+    putHbaseSitePropertyAttributes = self.putPropertyAttribute(configurations, "hbase-site")
+    putHbaseSiteProperty("hbase.regionserver.global.memstore.size", '0.4')
 
     if 'hbase-env' in services['configurations'] and 'phoenix_sql_enabled' in services['configurations']['hbase-env']['properties']:
       if 'true' == services['configurations']['hbase-env']['properties']['phoenix_sql_enabled'].lower():
+        putHbaseSiteProperty("hbase.region.server.rpc.scheduler.factory.class", "org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory")
+        putHbaseSiteProperty("hbase.rpc.controllerfactory.class", "org.apache.hadoop.hbase.ipc.controller.ServerRpcControllerFactory")
         putHbaseSiteProperty("hbase.regionserver.wal.codec", 'org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec')
       else:
         putHbaseSiteProperty("hbase.regionserver.wal.codec", 'org.apache.hadoop.hbase.regionserver.wal.WALCellCodec')
+        putHbaseSitePropertyAttributes('hbase.region.server.rpc.scheduler.factory.class', 'delete', 'true')
+        putHbaseSitePropertyAttributes('hbase.rpc.controllerfactory.class', 'delete', 'true')
+
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    if 'ranger-hbase-plugin-properties' in services['configurations'] and ('ranger-hbase-plugin-enabled' in services['configurations']['ranger-hbase-plugin-properties']['properties']):
+      rangerPluginEnabled = services['configurations']['ranger-hbase-plugin-properties']['properties']['ranger-hbase-plugin-enabled']
+      if ("RANGER" in servicesList) and (rangerPluginEnabled.lower() == "Yes".lower()):
+        putHbaseSiteProperty("hbase.security.authorization", 'true')
+        putHbaseSiteProperty("hbase.coprocessor.master.classes", 'com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor')
+        putHbaseSiteProperty("hbase.coprocessor.region.classes", 'com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor')
 
     # Recommend configs for bucket cache
     threshold = 23 # 2 Gb is reserved for other offheap memory
@@ -372,7 +457,7 @@ class HDPWIN22StackAdvisor(HDPWIN21StackAdvisor):
       # Set values in hbase-site
       putHbaseProperty = self.putProperty(configurations, "hbase-site", services)
       putHbaseProperty('hfile.block.cache.size', hfile_block_cache_size)
-      putHbaseProperty('hbase.regionserver.global.memstore.upperLimit', hbase_regionserver_global_memstore_size)
+      putHbaseProperty('hbase.regionserver.global.memstore.size', hbase_regionserver_global_memstore_size)
       putHbaseProperty('hbase.bucketcache.ioengine', 'offheap')
       putHbaseProperty('hbase.bucketcache.size', hbase_bucketcache_size)
       putHbaseProperty('hbase.bucketcache.percentage.in.combinedcache', hbase_bucketcache_percentage_in_combinedcache_str)
@@ -391,12 +476,57 @@ class HDPWIN22StackAdvisor(HDPWIN21StackAdvisor):
       putHbaseEnvProperty = self.putProperty(configurations, "hbase-env", services)
       putHbaseEnvProperty('hbase_max_direct_memory_size', '')
 
+    # Authorization
+    # If configurations has it - it has priority as it is calculated. Then, the service's configurations will be used.
+    hbase_security_authorization = None
+    if 'hbase-site' in configurations and 'hbase.security.authorization' in configurations['hbase-site']['properties']:
+      hbase_security_authorization = configurations['hbase-site']['properties']['hbase.security.authorization']
+    elif 'hbase-site' in services['configurations'] and 'hbase.security.authorization' in services['configurations']['hbase-site']['properties']:
+      hbase_security_authorization = services['configurations']['hbase-site']['properties']['hbase.security.authorization']
+    if hbase_security_authorization:
+      if 'true' == hbase_security_authorization.lower():
+        putHbaseProperty('hbase.coprocessor.master.classes', "org.apache.hadoop.hbase.security.access.AccessController")
+        putHbaseProperty('hbase.coprocessor.region.classes', "org.apache.hadoop.hbase.security.access.AccessController")
+        putHbaseProperty('hbase.coprocessor.regionserver.classes', "org.apache.hadoop.hbase.security.access.AccessController")
+      else:
+        putHbaseProperty('hbase.coprocessor.master.classes', "")
+        putHbaseProperty('hbase.coprocessor.region.classes', "")
+        putHbaseSitePropertyAttributes('hbase.coprocessor.regionserver.classes', 'delete', 'true')
+    else:
+      putHbaseSitePropertyAttributes('hbase.coprocessor.regionserver.classes', 'delete', 'true')
+
+    # Authentication
+    if 'hbase-site' in services['configurations'] and 'hbase.security.authentication' in services['configurations']['hbase-site']['properties']:
+      hbase_coprocessor_region_classes = None
+      if 'hbase.coprocessor.region.classes' in configurations["hbase-site"]["properties"]:
+        hbase_coprocessor_region_classes = configurations["hbase-site"]["properties"]["hbase.coprocessor.region.classes"].strip()
+      elif 'hbase.coprocessor.region.classes' in services['configurations']["hbase-site"]["properties"]:
+        hbase_coprocessor_region_classes = services['configurations']["hbase-site"]["properties"]["hbase.coprocessor.region.classes"].strip()
+      if hbase_coprocessor_region_classes:
+        coprocessorRegionClassList = hbase_coprocessor_region_classes.split(',')
+      else:
+        coprocessorRegionClassList = []
+      if 'kerberos' == services['configurations']['hbase-site']['properties']['hbase.security.authentication'].lower():
+        if 'org.apache.hadoop.hbase.security.token.TokenProvider' not in coprocessorRegionClassList:
+          coprocessorRegionClassList.append('org.apache.hadoop.hbase.security.token.TokenProvider')
+          putHbaseProperty('hbase.coprocessor.region.classes', ','.join(coprocessorRegionClassList))
+      else:
+        if 'org.apache.hadoop.hbase.security.token.TokenProvider' in coprocessorRegionClassList:
+          coprocessorRegionClassList.remove('org.apache.hadoop.hbase.security.token.TokenProvider')
+          putHbaseProperty('hbase.coprocessor.region.classes', ','.join(coprocessorRegionClassList))
+
+
   def recommendTezConfigurations(self, configurations, clusterData, services, hosts):
+    if not "yarn-site" in configurations:
+      self.recommendYARNConfigurations(configurations, clusterData, services, hosts)
+    #properties below should be always present as they are provided in HDP206 stack advisor
+    yarnMaxAllocationSize = min(30 * int(configurations["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"]), int(configurations["yarn-site"]["properties"]["yarn.scheduler.maximum-allocation-mb"]))
+
     putTezProperty = self.putProperty(configurations, "tez-site")
     putTezProperty("tez.am.resource.memory.mb", int(clusterData['amMemory']) * 2 if int(clusterData['amMemory']) < 3072 else int(clusterData['amMemory']))
 
     taskResourceMemory = clusterData['mapMemory'] if clusterData['mapMemory'] > 2048 else int(clusterData['reduceMemory'])
-    taskResourceMemory = min(clusterData['containers'] * clusterData['ramPerContainer'], taskResourceMemory)
+    taskResourceMemory = min(clusterData['containers'] * clusterData['ramPerContainer'], taskResourceMemory, yarnMaxAllocationSize)
     putTezProperty("tez.task.resource.memory.mb", taskResourceMemory)
     putTezProperty("tez.runtime.io.sort.mb", min(int(taskResourceMemory * 0.4), 2047))
     putTezProperty("tez.runtime.unordered.output.buffer.size-mb", int(taskResourceMemory * 0.075))
@@ -406,7 +536,10 @@ class HDPWIN22StackAdvisor(HDPWIN21StackAdvisor):
     childValidators = {
       "HDFS": {"hdfs-site": self.validateHDFSConfigurations,
                "hadoop-env": self.validateHDFSConfigurationsEnv},
-      "HIVE": {"hive-site": self.validateHiveConfigurations},
+      "YARN": {"yarn-env": self.validateYARNEnvConfigurations},
+      "HIVE": {"hiveserver2-site": self.validateHiveServer2Configurations,
+               "hive-site": self.validateHiveConfigurations,
+               "hive-env": self.validateHiveConfigurationsEnv},
       "HBASE": {"hbase-site": self.validateHBASEConfigurations,
                 "hbase-env": self.validateHBASEEnvConfigurations},
       "MAPREDUCE2": {"mapred-site": self.validateMapReduce2Configurations},
@@ -415,6 +548,62 @@ class HDPWIN22StackAdvisor(HDPWIN21StackAdvisor):
     parentValidators.update(childValidators)
     return parentValidators
 
+  def validateTezConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = [ {"config-name": 'tez.am.resource.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'tez.am.resource.memory.mb')},
+                        {"config-name": 'tez.task.resource.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'tez.task.resource.memory.mb')},
+                        {"config-name": 'tez.runtime.io.sort.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'tez.runtime.io.sort.mb')},
+                        {"config-name": 'tez.runtime.unordered.output.buffer.size-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'tez.runtime.unordered.output.buffer.size-mb')},]
+    return self.toConfigurationValidationProblems(validationItems, "tez-site")
+
+  def recommendMapReduce2Configurations(self, configurations, clusterData, services, hosts):
+    self.recommendYARNConfigurations(configurations, clusterData, services, hosts)
+    putMapredProperty = self.putProperty(configurations, "mapred-site", services)
+    nodemanagerMinRam = 1048576 # 1TB in mb
+    for nodemanager in self.getHostsWithComponent("YARN", "NODEMANAGER", services, hosts):
+      nodemanagerMinRam = min(nodemanager["Hosts"]["total_mem"]/1024, nodemanagerMinRam)
+    putMapredProperty('yarn.app.mapreduce.am.resource.mb', configurations["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"])
+    putMapredProperty('yarn.app.mapreduce.am.command-opts', "-Xmx" + str(int(0.8 * int(configurations["mapred-site"]["properties"]["yarn.app.mapreduce.am.resource.mb"]))) + "m" + " -Dhdp.version=${hdp.version}")
+    putMapredProperty('mapreduce.reduce.memory.mb', min(2*int(configurations["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"]), int(nodemanagerMinRam)))
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    min_mapreduce_map_memory_mb = 0
+    min_mapreduce_map_java_opts = 0
+    if ("PIG" in servicesList):
+      min_mapreduce_map_memory_mb = 1500
+      min_mapreduce_map_java_opts = 1024
+    putMapredProperty('mapreduce.map.memory.mb', max(min_mapreduce_map_memory_mb, int(configurations["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"])))
+    mapredMapXmx = int(0.8*int(configurations["mapred-site"]["properties"]["mapreduce.map.memory.mb"]));
+    putMapredProperty('mapreduce.map.java.opts', "-Xmx" + str(max(min_mapreduce_map_java_opts, mapredMapXmx)) + "m")
+    putMapredProperty('mapreduce.reduce.java.opts', "-Xmx" + str(int(0.8*int(configurations["mapred-site"]["properties"]["mapreduce.reduce.memory.mb"]))) + "m")
+    putMapredProperty('mapreduce.task.io.sort.mb', str(min(int(0.7*mapredMapXmx), 2047)))
+    # Property Attributes
+    putMapredPropertyAttribute = self.putPropertyAttribute(configurations, "mapred-site")
+    yarnMinAllocationSize = int(configurations["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"])
+    yarnMaxAllocationSize = min(30 * int(configurations["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"]), int(configurations["yarn-site"]["properties"]["yarn.scheduler.maximum-allocation-mb"]))
+    putMapredPropertyAttribute("mapreduce.map.memory.mb", "maximum", yarnMaxAllocationSize)
+    putMapredPropertyAttribute("mapreduce.map.memory.mb", "minimum", yarnMinAllocationSize)
+    putMapredPropertyAttribute("mapreduce.reduce.memory.mb", "maximum", yarnMaxAllocationSize)
+    putMapredPropertyAttribute("mapreduce.reduce.memory.mb", "minimum", yarnMinAllocationSize)
+    putMapredPropertyAttribute("yarn.app.mapreduce.am.resource.mb", "maximum", yarnMaxAllocationSize)
+    putMapredPropertyAttribute("yarn.app.mapreduce.am.resource.mb", "minimum", yarnMinAllocationSize)
+    # Hadoop MR limitation
+    putMapredPropertyAttribute("mapreduce.task.io.sort.mb", "maximum", "2047")
+
+  def validateMapReduce2Configurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = [ {"config-name": 'mapreduce.map.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'mapreduce.map.java.opts')},
+                        {"config-name": 'mapreduce.reduce.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'mapreduce.reduce.java.opts')},
+                        {"config-name": 'mapreduce.task.io.sort.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.task.io.sort.mb')},
+                        {"config-name": 'mapreduce.map.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.map.memory.mb')},
+                        {"config-name": 'mapreduce.reduce.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.reduce.memory.mb')},
+                        {"config-name": 'yarn.app.mapreduce.am.resource.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.app.mapreduce.am.resource.mb')},
+                        {"config-name": 'yarn.app.mapreduce.am.command-opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'yarn.app.mapreduce.am.command-opts')}]
+    return self.toConfigurationValidationProblems(validationItems, "mapred-site")
+
+  def validateHDFSConfigurationsEnv(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = [ {"config-name": 'namenode_heapsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'namenode_heapsize')},
+                        {"config-name": 'namenode_opt_newsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'namenode_opt_newsize')},
+                        {"config-name": 'namenode_opt_maxnewsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'namenode_opt_maxnewsize')}]
+    return self.toConfigurationValidationProblems(validationItems, "hadoop-env")
+  
   def validateHDFSConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
     # We can not access property hadoop.security.authentication from the
     # other config (core-site). That's why we are using another heuristics here
@@ -435,9 +624,40 @@ class HDPWIN22StackAdvisor(HDPWIN21StackAdvisor):
     VALID_TRANSFER_PROTECTION_VALUES = ['authentication', 'integrity', 'privacy']
 
     validationItems = []
+    address_properties = [
+      # "dfs.datanode.address",
+      # "dfs.datanode.http.address",
+      # "dfs.datanode.https.address",
+      # "dfs.datanode.ipc.address",
+      # "dfs.journalnode.http-address",
+      # "dfs.journalnode.https-address",
+      # "dfs.namenode.rpc-address",
+      # "dfs.namenode.secondary.http-address",
+      "dfs.namenode.http-address",
+      "dfs.namenode.https-address",
+    ]
+    #Validating *address properties for correct values
+
+    for address_property in address_properties:
+      if address_property in hdfs_site:
+        value = hdfs_site[address_property]
+        if not is_valid_host_port_authority(value):
+          validationItems.append({"config-name" : address_property, "item" :
+            self.getErrorItem(address_property + " does not contain a valid host:port authority: " + value)})
+
+    #Adding Ranger Plugin logic here 
+    ranger_plugin_properties = getSiteProperties(configurations, "ranger-hdfs-plugin-properties")
+    ranger_plugin_enabled = ranger_plugin_properties['ranger-hdfs-plugin-enabled']
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    if ("RANGER" in servicesList) and (ranger_plugin_enabled.lower() == 'Yes'.lower()):
+      if hdfs_site['dfs.permissions.enabled'] != 'true':
+        validationItems.append({"config-name": 'dfs.permissions.enabled',
+                                    "item": self.getWarnItem(
+                                      "dfs.permissions.enabled needs to be set to true if Ranger HDFS Plugin is enabled.")})
+
     if (not wire_encryption_enabled and   # If wire encryption is enabled at Hadoop, it disables all our checks
-            core_site['hadoop.security.authentication'] == 'kerberos' and
-            core_site['hadoop.security.authorization'] == 'true'):
+          core_site['hadoop.security.authentication'] == 'kerberos' and
+          core_site['hadoop.security.authorization'] == 'true'):
       # security is enabled
 
       dfs_http_policy = 'dfs.http.policy'
@@ -447,15 +667,15 @@ class HDPWIN22StackAdvisor(HDPWIN21StackAdvisor):
       data_transfer_protection = 'dfs.data.transfer.protection'
 
       try: # Params may be absent
-        privileged_dfs_dn_port = (False, True)[getPort(hdfs_site[dfs_datanode_address]) is not None]
+        privileged_dfs_dn_port = isSecurePort(getPort(hdfs_site[dfs_datanode_address]))
       except KeyError:
         privileged_dfs_dn_port = False
       try:
-        privileged_dfs_http_port = (False, True)[getPort(hdfs_site[datanode_http_address]) is not None]
+        privileged_dfs_http_port = isSecurePort(getPort(hdfs_site[datanode_http_address]))
       except KeyError:
         privileged_dfs_http_port = False
       try:
-        privileged_dfs_https_port = (False, True)[getPort(hdfs_site[datanode_https_address]) is not None]
+        privileged_dfs_https_port = isSecurePort(getPort(hdfs_site[datanode_https_address]))
       except KeyError:
         privileged_dfs_https_port = False
       try:
@@ -498,7 +718,7 @@ class HDPWIN22StackAdvisor(HDPWIN21StackAdvisor):
                     "In a secure cluster, Datanode forbids using non-secure ports " \
                     "if {0} is not set to {3}. " \
                     "Please make sure that properties {2} use secure ports.".format(
-            dfs_http_policy, dfs_http_policy_value, important_properties, HTTPS_ONLY)
+                      dfs_http_policy, dfs_http_policy_value, important_properties, HTTPS_ONLY)
           address_properties_with_warnings.extend(important_properties)
 
       # Generate port-related warnings if any
@@ -513,7 +733,7 @@ class HDPWIN22StackAdvisor(HDPWIN21StackAdvisor):
                                   "item": self.getWarnItem(
                                     "{0} property can not be used when {1} is set to any "
                                     "value other then {2}. Tip: When {1} property is not defined, it defaults to {3}".format(
-                                      data_transfer_protection, dfs_http_policy, HTTPS_ONLY, HTTP_ONLY))})
+                                    data_transfer_protection, dfs_http_policy, HTTPS_ONLY, HTTP_ONLY))})
         elif not data_transfer_protection_value in VALID_TRANSFER_PROTECTION_VALUES:
           validationItems.append({"config-name": data_transfer_protection,
                                   "item": self.getWarnItem(
@@ -521,59 +741,189 @@ class HDPWIN22StackAdvisor(HDPWIN21StackAdvisor):
                                       data_transfer_protection_value, VALID_TRANSFER_PROTECTION_VALUES))})
     return self.toConfigurationValidationProblems(validationItems, "hdfs-site")
 
-  def validateHDFSConfigurationsEnv(self, properties, recommendedDefaults, configurations, services, hosts):
-    validationItems = [ {"config-name": 'namenode_heapsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'namenode_heapsize')},
-                        {"config-name": 'namenode_opt_newsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'namenode_opt_newsize')},
-                        {"config-name": 'namenode_opt_maxnewsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'namenode_opt_maxnewsize')}]
-    return self.toConfigurationValidationProblems(validationItems, "hadoop-env")
+  def validateHiveServer2Configurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    super(HDPWIN22StackAdvisor, self).validateHiveConfigurations(properties, recommendedDefaults, configurations, services, hosts)
+    hive_server2 = properties
+    validationItems = [] 
+    #Adding Ranger Plugin logic here 
+    ranger_plugin_properties = getSiteProperties(configurations, "ranger-hive-plugin-properties")
+    ranger_plugin_enabled = ranger_plugin_properties['ranger-hive-plugin-enabled']
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    ##Add stack validations only if Ranger is enabled.
+    if ("RANGER" in servicesList):
+      ##Add stack validations for  Ranger plugin enabled.
+      if (ranger_plugin_enabled.lower() == 'Yes'.lower()):
+        prop_name = 'hive.security.authorization.manager'
+        prop_val = "com.xasecure.authorization.hive.authorizer.XaSecureHiveAuthorizerFactory"
+        if hive_server2[prop_name] != prop_val:
+          validationItems.append({"config-name": prop_name,
+                                  "item": self.getWarnItem(
+                                  "If Ranger Hive Plugin is enabled."\
+                                  " {0} needs to be set to {1}".format(prop_name,prop_val))})
+        prop_name = 'hive.security.authenticator.manager'
+        prop_val = "org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator"
+        if hive_server2[prop_name] != prop_val:
+          validationItems.append({"config-name": prop_name,
+                                  "item": self.getWarnItem(
+                                  "If Ranger Hive Plugin is enabled."\
+                                  " {0} needs to be set to {1}".format(prop_name,prop_val))})
+      ##Add stack validations for  Ranger plugin disabled.
+      elif (ranger_plugin_enabled.lower() == 'No'.lower()):
+        prop_name = 'hive.security.authorization.manager'
+        prop_val = "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"
+        if hive_server2[prop_name] != prop_val:
+          validationItems.append({"config-name": prop_name,
+                                  "item": self.getWarnItem(
+                                  "If Ranger Hive Plugin is disabled."\
+                                  " {0} needs to be set to {1}".format(prop_name,prop_val))})
+        prop_name = 'hive.security.authenticator.manager'
+        prop_val = "org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator"
+        if hive_server2[prop_name] != prop_val:
+          validationItems.append({"config-name": prop_name,
+                                  "item": self.getWarnItem(
+                                  "If Ranger Hive Plugin is disabled."\
+                                  " {0} needs to be set to {1}".format(prop_name,prop_val))})
+    return self.toConfigurationValidationProblems(validationItems, "hiveserver2-site")
+
+  def validateHiveConfigurationsEnv(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = []
+    hive_env = properties
+    hive_site = getSiteProperties(configurations, "hive-site")
+    if str(hive_env["hive_security_authorization"]).lower() == "none" \
+      and str(hive_site["hive.security.authorization.enabled"]).lower() == "true":
+      authorization_item = self.getErrorItem("hive_security_authorization should not be None "
+                                             "if hive.security.authorization.enabled is set")
+      validationItems.append({"config-name": "hive_security_authorization", "item": authorization_item})
+
+    return self.toConfigurationValidationProblems(validationItems, "hive-env")
 
   def validateHiveConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
     super(HDPWIN22StackAdvisor, self).validateHiveConfigurations(properties, recommendedDefaults, configurations, services, hosts)
+    hive_site = properties
     validationItems = []
+    #Adding Ranger Plugin logic here
+    ranger_plugin_properties = getSiteProperties(configurations, "ranger-hive-plugin-properties")
+    ranger_plugin_enabled = ranger_plugin_properties['ranger-hive-plugin-enabled']
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    ##Add stack validations only if Ranger is enabled.
+    if ("RANGER" in servicesList):
+      ##Add stack validations for  Ranger plugin enabled.
+      if (ranger_plugin_enabled.lower() == 'Yes'.lower()):
+        prop_name = 'hive.security.authorization.enabled'
+        prop_val = 'true'
+        if hive_site[prop_name] != prop_val:
+          validationItems.append({"config-name": prop_name,
+                                  "item": self.getWarnItem(
+                                    "If Ranger Hive Plugin is enabled." \
+                                    " {0} needs to be set to {1}".format(prop_name,prop_val))})
+
+        prop_name = 'hive.conf.restricted.list'
+        prop_vals = 'hive.security.authorization.enabled,hive.security.authorization.manager,hive.security.authenticator.manager'.split(',')
+        current_vals = hive_site[prop_name].split(',')
+        missing_vals = []
+
+        for val in prop_vals:
+          if not val in current_vals:
+            missing_vals.append(val)
+
+        if missing_vals:
+          validationItems.append({"config-name": prop_name,
+                                  "item": self.getWarnItem(
+                                  "If Ranger Hive Plugin is enabled." \
+                                  " {0} needs to contain {1}".format(prop_name, ','.join(missing_vals)))})
     stripe_size_values = [8388608, 16777216, 33554432, 67108864, 134217728, 268435456]
     stripe_size_property = "hive.exec.orc.default.stripe.size"
     if int(properties[stripe_size_property]) not in stripe_size_values:
       validationItems.append({"config-name": stripe_size_property, "item": self.getWarnItem("Correct values are ")})
     return self.toConfigurationValidationProblems(validationItems, "hive-site")
 
-  def validateTezConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
-    validationItems = [ {"config-name": 'tez.am.resource.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'tez.am.resource.memory.mb')},
-                        {"config-name": 'tez.task.resource.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'tez.task.resource.memory.mb')},
-                        {"config-name": 'tez.runtime.io.sort.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'tez.runtime.io.sort.mb')},
-                        {"config-name": 'tez.runtime.unordered.output.buffer.size-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'tez.runtime.unordered.output.buffer.size-mb')},]
-    return self.toConfigurationValidationProblems(validationItems, "tez-site")
-
-  def validateMapReduce2Configurations(self, properties, recommendedDefaults, configurations, services, hosts):
-    validationItems = [ {"config-name": 'mapreduce.map.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'mapreduce.map.java.opts')},
-                        {"config-name": 'mapreduce.reduce.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'mapreduce.reduce.java.opts')},
-                        {"config-name": 'mapreduce.task.io.sort.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.task.io.sort.mb')},
-                        {"config-name": 'mapreduce.map.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.map.memory.mb')},
-                        {"config-name": 'mapreduce.reduce.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.reduce.memory.mb')},
-                        {"config-name": 'yarn.app.mapreduce.am.resource.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.app.mapreduce.am.resource.mb')},
-                        {"config-name": 'yarn.app.mapreduce.am.command-opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'yarn.app.mapreduce.am.command-opts')}]
-    return self.toConfigurationValidationProblems(validationItems, "mapred-site")
-
   def validateHBASEConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
     super(HDPWIN22StackAdvisor, self).validateHbaseEnvConfigurations(properties, recommendedDefaults, configurations, services, hosts)
     hbase_site = properties
     validationItems = []
 
-    prop_name1 = 'hbase.regionserver.global.memstore.upperLimit'
+    prop_name1 = 'hbase.regionserver.global.memstore.size'
     prop_name2 = 'hfile.block.cache.size'
     props_max_sum = 0.8
 
     if not is_number(hbase_site[prop_name1]):
       validationItems.append({"config-name": prop_name1,
                               "item": self.getWarnItem(
-                                "{0} should be float value".format(prop_name1))})
+                              "{0} should be float value".format(prop_name1))})
     elif not is_number(hbase_site[prop_name2]):
       validationItems.append({"config-name": prop_name2,
                               "item": self.getWarnItem(
-                                "{0} should be float value".format(prop_name2))})
+                              "{0} should be float value".format(prop_name2))})
     elif float(hbase_site[prop_name1]) + float(hbase_site[prop_name2]) > props_max_sum:
       validationItems.append({"config-name": prop_name1,
                               "item": self.getWarnItem(
-                                "{0} and {1} sum should not exceed {2}".format(prop_name1, prop_name2, props_max_sum))})
+                              "{0} and {1} sum should not exceed {2}".format(prop_name1, prop_name2, props_max_sum))})
+
+    #Adding Ranger Plugin logic here 
+    ranger_plugin_properties = getSiteProperties(configurations, "ranger-hbase-plugin-properties")
+    ranger_plugin_enabled = ranger_plugin_properties['ranger-hbase-plugin-enabled']
+    prop_name = 'hbase.security.authorization'
+    prop_val = "true"
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    if ("RANGER" in servicesList) and (ranger_plugin_enabled.lower() == 'Yes'.lower()):
+      if hbase_site[prop_name] != prop_val:
+        validationItems.append({"config-name": prop_name,
+                                "item": self.getWarnItem(
+                                "If Ranger HBase Plugin is enabled."\
+                                "{0} needs to be set to {1}".format(prop_name,prop_val))})
+      prop_name = "hbase.coprocessor.master.classes"
+      prop_val = "com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor"
+      exclude_val = "org.apache.hadoop.hbase.security.access.AccessController"
+      if (prop_val in hbase_site[prop_name] and exclude_val not in hbase_site[prop_name]):
+        pass
+      else:
+        validationItems.append({"config-name": prop_name,
+                                "item": self.getWarnItem(
+                                "If Ranger HBase Plugin is enabled."\
+                                " {0} needs to contain {1} instead of {2}".format(prop_name,prop_val,exclude_val))})
+      prop_name = "hbase.coprocessor.region.classes"
+      prop_val = "com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor"
+      if (prop_val in hbase_site[prop_name] and exclude_val not in hbase_site[prop_name]):
+        pass
+      else:
+        validationItems.append({"config-name": prop_name,
+                                "item": self.getWarnItem(
+                                "If Ranger HBase Plugin is enabled."\
+                                " {0} needs to contain {1} instead of {2}".format(prop_name,prop_val,exclude_val))})
+
+    # Validate bucket cache correct config
+    prop_name = "hbase.bucketcache.ioengine"
+    prop_val = "offheap"
+    if not (not hbase_site[prop_name] or hbase_site[prop_name] == prop_val):
+      validationItems.append({"config-name": prop_name,
+                              "item": self.getWarnItem(
+                                "Recommended values of " \
+                                " {0} is empty or '{1}'".format(prop_name,prop_val))})
+
+    prop_name1 = "hbase.bucketcache.ioengine"
+    prop_name2 = "hbase.bucketcache.size"
+    prop_name3 = "hbase.bucketcache.percentage.in.combinedcache"
+
+    if hbase_site[prop_name1] and not hbase_site[prop_name2]:
+      validationItems.append({"config-name": prop_name2,
+                              "item": self.getWarnItem(
+                                "If bucketcache ioengine is enabled, {0} should be set".format(prop_name2))})
+    if hbase_site[prop_name1] and not hbase_site[prop_name3]:
+      validationItems.append({"config-name": prop_name3,
+                              "item": self.getWarnItem(
+                                "If bucketcache ioengine is enabled, {0} should be set".format(prop_name3))})
+
+    # Validate hbase.security.authentication. 
+    # Kerberos works only when security enabled.
+    if "hbase.security.authentication" in properties:
+      hbase_security_kerberos = properties["hbase.security.authentication"].lower() == "kerberos"
+      core_site_properties = getSiteProperties(configurations, "core-site")
+      security_enabled = False
+      if core_site_properties:
+        security_enabled = core_site_properties['hadoop.security.authentication'] == 'kerberos' and core_site_properties['hadoop.security.authorization'] == 'true'
+      if not security_enabled and hbase_security_kerberos:
+        validationItems.append({"config-name": "hbase.security.authentication",
+                              "item": self.getWarnItem("Cluster must be secured with Kerberos before hbase.security.authentication's value of kerberos will have effect")})
 
     return self.toConfigurationValidationProblems(validationItems, "hbase-site")
 
@@ -581,12 +931,90 @@ class HDPWIN22StackAdvisor(HDPWIN21StackAdvisor):
     hbase_env = properties
     validationItems = [ {"config-name": 'hbase_regionserver_heapsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'hbase_regionserver_heapsize')},
                         {"config-name": 'hbase_master_heapsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'hbase_master_heapsize')} ]
+    prop_name = "hbase_max_direct_memory_size"
+    hbase_site_properties = getSiteProperties(configurations, "hbase-site")
+    prop_name1 = "hbase.bucketcache.ioengine"
+
+    if hbase_site_properties[prop_name1] and hbase_site_properties[prop_name1] == "offheap" and not hbase_env[prop_name]:
+      validationItems.append({"config-name": prop_name,
+                              "item": self.getWarnItem(
+                                "If bucketcache ioengine is enabled, {0} should be set".format(prop_name))})
 
     return self.toConfigurationValidationProblems(validationItems, "hbase-env")
 
+  def validateYARNEnvConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = []
+    if "yarn_cgroups_enabled" in properties:
+      yarn_cgroups_enabled = properties["yarn_cgroups_enabled"].lower() == "true"
+      core_site_properties = getSiteProperties(configurations, "core-site")
+      security_enabled = False
+      if core_site_properties:
+        security_enabled = core_site_properties['hadoop.security.authentication'] == 'kerberos' and core_site_properties['hadoop.security.authorization'] == 'true'
+      if not security_enabled and yarn_cgroups_enabled:
+        validationItems.append({"config-name": "yarn_cgroups_enabled",
+                              "item": self.getWarnItem("CPU Isolation should only be enabled if security is enabled")})
+    return self.toConfigurationValidationProblems(validationItems, "yarn-env")
+
+  def getMastersWithMultipleInstances(self):
+    result = super(HDPWIN22StackAdvisor, self).getMastersWithMultipleInstances()
+    result.extend(['METRICS_COLLECTOR'])
+    return result
+
+  def getNotValuableComponents(self):
+    result = super(HDPWIN22StackAdvisor, self).getNotValuableComponents()
+    result.extend(['METRICS_MONITOR'])
+    return result
+
+  def getNotPreferableOnServerComponents(self):
+    result = super(HDPWIN22StackAdvisor, self).getNotPreferableOnServerComponents()
+    result.extend(['METRICS_COLLECTOR'])
+    return result
+
+  def getCardinalitiesDict(self):
+    result = super(HDPWIN22StackAdvisor, self).getCardinalitiesDict()
+    result['METRICS_COLLECTOR'] = {"min": 1}
+    return result
+
+  def getAffectedConfigs(self, services):
+    affectedConfigs = super(HDPWIN22StackAdvisor, self).getAffectedConfigs(services)
+
+    # There are configs that are not defined in the stack but added/removed by 
+    # stack-advisor. Here we add such configs in order to clear the config
+    # filtering down in base class
+    configsList = [affectedConfig["type"] + "/" + affectedConfig["name"] for affectedConfig in affectedConfigs]
+    if 'yarn-env/yarn_cgroups_enabled' in configsList:
+      if 'yarn-site/yarn.nodemanager.container-executor.class' not in configsList:
+        affectedConfigs.append({"type": "yarn-site", "name": "yarn.nodemanager.container-executor.class"})
+      if 'yarn-site/yarn.nodemanager.container-executor.group' not in configsList:
+        affectedConfigs.append({"type": "yarn-site", "name": "yarn.nodemanager.container-executor.group"})
+      if 'yarn-site/yarn.nodemanager.container-executor.resources-handler.class' not in configsList:
+        affectedConfigs.append({"type": "yarn-site", "name": "yarn.nodemanager.container-executor.resources-handler.class"})
+      if 'yarn-site/yarn.nodemanager.container-executor.cgroups.hierarchy' not in configsList:
+        affectedConfigs.append({"type": "yarn-site", "name": "yarn.nodemanager.container-executor.cgroups.hierarchy"})
+      if 'yarn-site/yarn.nodemanager.container-executor.cgroups.mount' not in configsList:
+        affectedConfigs.append({"type": "yarn-site", "name": "yarn.nodemanager.container-executor.cgroups.mount"})
+      if 'yarn-site/yarn.nodemanager.linux-container-executor.cgroups.mount-path' not in configsList:
+        affectedConfigs.append({"type": "yarn-site", "name": "yarn.nodemanager.linux-container-executor.cgroups.mount-path"})
+
+    return affectedConfigs;
+
 def is_number(s):
   try:
     float(s)
     return True
   except ValueError:
-    pass
+    pass
+
+  return False
+
+def is_valid_host_port_authority(target):
+  has_scheme = "://" in target
+  if not has_scheme:
+    target = "dummyscheme://"+target
+  try:
+    result = urlparse(target)
+    if result.hostname is not None and result.port is not None:
+      return True
+  except ValueError:
+    pass
+  return False