瀏覽代碼

AMBARI-10646. HBase config UI: Eliminate hbase.client.scanner.caching setting (srimanth)

Srimanth Gunturi 10 年之前
父節點
當前提交
1a02c9cce6

+ 2 - 2
ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-env.xml

@@ -35,7 +35,7 @@
     <name>hbase_regionserver_heapsize</name>
     <name>hbase_regionserver_heapsize</name>
     <value>4096</value>
     <value>4096</value>
     <description>HBase RegionServer Heap Size.</description>
     <description>HBase RegionServer Heap Size.</description>
-    <display-name>HBase RegionServer Maximum Memory</display-name>
+    <display-name>RegionServer Maximum Memory</display-name>
     <value-attributes>
     <value-attributes>
       <type>int</type>
       <type>int</type>
       <minimum>1024</minimum>
       <minimum>1024</minimum>
@@ -62,7 +62,7 @@ and the -Xmn ratio (hbase_regionserver_xmn_ratio) exceeds this value.
     <name>hbase_master_heapsize</name>
     <name>hbase_master_heapsize</name>
     <value>4096</value>
     <value>4096</value>
     <description>HBase Master Heap Size</description>
     <description>HBase Master Heap Size</description>
-    <display-name>HBase Master Maximum Memory</display-name>
+    <display-name>Master Maximum Memory</display-name>
     <value-attributes>
     <value-attributes>
       <type>int</type>
       <type>int</type>
       <minimum>1024</minimum>
       <minimum>1024</minimum>

+ 11 - 9
ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-site.xml

@@ -89,6 +89,7 @@
       <type>float</type>
       <type>float</type>
       <minimum>0</minimum>
       <minimum>0</minimum>
       <maximum>0.8</maximum>
       <maximum>0.8</maximum>
+      <increment-step>0.01</increment-step>
     </value-attributes>
     </value-attributes>
   </property>
   </property>
   <property>
   <property>
@@ -188,7 +189,7 @@
   </property>
   </property>
   <property>
   <property>
     <name>hbase.hregion.max.filesize</name>
     <name>hbase.hregion.max.filesize</name>
-    <value>1073741824</value>
+    <value>10737418240</value>
     <description>
     <description>
     Maximum HStoreFile size. If any one of a column families' HStoreFiles has
     Maximum HStoreFile size. If any one of a column families' HStoreFiles has
     grown to exceed this value, the hosting HRegion is split in two.
     grown to exceed this value, the hosting HRegion is split in two.
@@ -198,9 +199,9 @@
     <value-attributes>
     <value-attributes>
       <type>int</type>
       <type>int</type>
       <minimum>1073741824</minimum>
       <minimum>1073741824</minimum>
-      <maximum>10737418240</maximum>
+      <maximum>107374182400</maximum>
       <unit>B</unit>
       <unit>B</unit>
-      <increment-step>268435456</increment-step>
+      <increment-step>1073741824</increment-step>
     </value-attributes>
     </value-attributes>
   </property>
   </property>
   <property>
   <property>
@@ -268,7 +269,7 @@
     is run to rewrite all HStoreFiles files as one.  Larger numbers
     is run to rewrite all HStoreFiles files as one.  Larger numbers
     put off compaction but when it runs, it takes longer to complete.
     put off compaction but when it runs, it takes longer to complete.
     </description>
     </description>
-    <display-name>Maximum Files in a Store before Compaction</display-name>
+    <display-name>Maximum Store Files before Minor Compaction</display-name>
     <value-attributes>
     <value-attributes>
       <type>int</type>
       <type>int</type>
       <entries>
       <entries>
@@ -315,6 +316,7 @@
       <type>float</type>
       <type>float</type>
       <minimum>0</minimum>
       <minimum>0</minimum>
       <maximum>0.8</maximum>
       <maximum>0.8</maximum>
+      <increment-step>0.01</increment-step>
     </value-attributes>
     </value-attributes>
   </property>
   </property>
 
 
@@ -456,15 +458,15 @@
   </property>
   </property>
   <property>
   <property>
     <name>hbase.rpc.timeout</name>
     <name>hbase.rpc.timeout</name>
-    <value>60000</value>
+    <value>90000</value>
     <description>This is for the RPC layer to define how long HBase client applications
     <description>This is for the RPC layer to define how long HBase client applications
         take for a remote call to time out. It uses pings to check connections
         take for a remote call to time out. It uses pings to check connections
         but will eventually throw a TimeoutException.</description>
         but will eventually throw a TimeoutException.</description>
-    <display-name>HBase RPC Timeout</display-name>
+    <display-name>RPC Timeout</display-name>
     <value-attributes>
     <value-attributes>
       <type>int</type>
       <type>int</type>
-      <minimum>30000</minimum>
-      <maximum>300000</maximum>
+      <minimum>10000</minimum>
+      <maximum>180000</maximum>
       <unit>milliseconds</unit>
       <unit>milliseconds</unit>
       <increment-step>10000</increment-step>
       <increment-step>10000</increment-step>
     </value-attributes>
     </value-attributes>
@@ -482,7 +484,7 @@
     <value-attributes>
     <value-attributes>
       <type>int</type>
       <type>int</type>
       <minimum>30000</minimum>
       <minimum>30000</minimum>
-      <maximum>300000</maximum>
+      <maximum>180000</maximum>
       <unit>milliseconds</unit>
       <unit>milliseconds</unit>
       <increment-step>10000</increment-step>
       <increment-step>10000</increment-step>
     </value-attributes>
     </value-attributes>

+ 2 - 2
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml

@@ -180,11 +180,11 @@
       <entries>
       <entries>
         <entry>
         <entry>
           <value>0.99</value>
           <value>0.99</value>
-          <label>NN HA</label>
+          <label>0.99f</label>
         </entry>
         </entry>
         <entry>
         <entry>
           <value>1.0</value>
           <value>1.0</value>
-          <label>No NN HA</label>
+          <label>1.0f</label>
         </entry>
         </entry>
       </entries>
       </entries>
       <selection-cardinality>1</selection-cardinality>
       <selection-cardinality>1</selection-cardinality>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/hbase-env.xml

@@ -60,7 +60,7 @@ and the -Xmn ratio (hbase_regionserver_xmn_ratio) exceeds this value.
     <name>phoenix_sql_enabled</name>
     <name>phoenix_sql_enabled</name>
     <value>false</value>
     <value>false</value>
     <description>Enable Phoenix SQL</description>
     <description>Enable Phoenix SQL</description>
-    <display-name>Phoenix SQL</display-name>
+    <display-name>Enable Phoenix</display-name>
     <value-attributes>
     <value-attributes>
       <type>value-list</type>
       <type>value-list</type>
       <entries>
       <entries>

+ 4 - 19
ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/themes/theme.json

@@ -16,7 +16,7 @@
                 "sections": [
                 "sections": [
                   {
                   {
                     "name": "section-hbase-memory",
                     "name": "section-hbase-memory",
-                    "display-name": "Memory",
+                    "display-name": "Server",
                     "row-index": "0",
                     "row-index": "0",
                     "column-index": "0",
                     "column-index": "0",
                     "row-span": "1",
                     "row-span": "1",
@@ -132,7 +132,7 @@
                   },
                   },
                   {
                   {
                     "name": "section-hbase-phoenix",
                     "name": "section-hbase-phoenix",
-                    "display-name": "Phoenix",
+                    "display-name": "Phoenix SQL",
                     "row-index": "2",
                     "row-index": "2",
                     "column-index": "2",
                     "column-index": "2",
                     "row-span": "1",
                     "row-span": "1",
@@ -173,7 +173,7 @@
         },
         },
         {
         {
           "config": "hbase-site/hbase.regionserver.global.memstore.upperLimit",
           "config": "hbase-site/hbase.regionserver.global.memstore.upperLimit",
-          "subsection-name": "subsection-hbase-memory-col2"
+          "subsection-name": "subsection-hbase-memory-col1"
         },
         },
         {
         {
           "config": "hbase-site/hbase.hregion.memstore.flush.size",
           "config": "hbase-site/hbase.hregion.memstore.flush.size",
@@ -185,11 +185,7 @@
         },
         },
         {
         {
           "config": "hbase-site/hbase.regionserver.handler.count",
           "config": "hbase-site/hbase.regionserver.handler.count",
-          "subsection-name": "subsection-hbase-client-col1"
-        },
-        {
-          "config": "hbase-site/hbase.client.scanner.caching",
-          "subsection-name": "subsection-hbase-client-col1"
+          "subsection-name": "subsection-hbase-memory-col2"
         },
         },
         {
         {
           "config": "hbase-site/hbase.client.retries.number",
           "config": "hbase-site/hbase.client.retries.number",
@@ -310,17 +306,6 @@
           ]
           ]
         }
         }
       },
       },
-      {
-        "config":"hbase-site/hbase.client.scanner.caching",
-        "widget":{
-          "type":"slider",
-          "units":[
-            {
-              "unit-name":"int"
-            }
-          ]
-        }
-      },
       {
       {
         "config":"hbase-site/hbase.client.retries.number",
         "config":"hbase-site/hbase.client.retries.number",
         "widget":{
         "widget":{

+ 12 - 5
ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py

@@ -100,6 +100,10 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
     putHdfsEnvProperty = self.putProperty(configurations, "hadoop-env", services)
     putHdfsEnvProperty = self.putProperty(configurations, "hadoop-env", services)
     putHdfsEnvPropertyAttribute = self.putPropertyAttribute(configurations, "hadoop-env")
     putHdfsEnvPropertyAttribute = self.putPropertyAttribute(configurations, "hadoop-env")
 
 
+    putHdfsEnvProperty('namenode_heapsize', max(int(clusterData['totalAvailableRam'] / 2), 1024))
+    putHdfsEnvProperty('namenode_opt_newsize', max(int(clusterData['totalAvailableRam'] / 8), 128))
+    putHdfsEnvProperty('namenode_opt_maxnewsize', max(int(clusterData['totalAvailableRam'] / 8), 256))
+
     nn_max_heapsize=None
     nn_max_heapsize=None
     if (namenodeHosts is not None and len(namenodeHosts) > 0):
     if (namenodeHosts is not None and len(namenodeHosts) > 0):
       if len(namenodeHosts) > 1:
       if len(namenodeHosts) > 1:
@@ -109,10 +113,13 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
 
 
       putHdfsEnvPropertyAttribute('namenode_heapsize', 'maximum', nn_max_heapsize)
       putHdfsEnvPropertyAttribute('namenode_heapsize', 'maximum', nn_max_heapsize)
 
 
-    #Old fallback values
-    putHdfsEnvProperty('namenode_heapsize', max(int(clusterData['totalAvailableRam'] / 2), 1024))
-    putHdfsEnvProperty('namenode_opt_newsize', max(int(clusterData['totalAvailableRam'] / 8), 128))
-    putHdfsEnvProperty('namenode_opt_maxnewsize', max(int(clusterData['totalAvailableRam'] / 8), 256))
+      nn_heapsize = nn_max_heapsize
+      nn_heapsize -= clusterData["reservedRam"]
+      if clusterData["hBaseInstalled"]:
+        nn_heapsize -= clusterData["hbaseRam"]
+      putHdfsEnvProperty('namenode_heapsize', max(int(nn_heapsize / 2), 1024))
+      putHdfsEnvProperty('namenode_opt_newsize', max(int(nn_heapsize / 8), 128))
+      putHdfsEnvProperty('namenode_opt_maxnewsize', max(int(nn_heapsize / 8), 256))
 
 
     datanodeHosts = self.getHostsWithComponent("HDFS", "DATANODE", services, hosts)
     datanodeHosts = self.getHostsWithComponent("HDFS", "DATANODE", services, hosts)
     if datanodeHosts is not None and len(datanodeHosts) > 0:
     if datanodeHosts is not None and len(datanodeHosts) > 0:
@@ -154,7 +161,7 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
       nn_memory_config = nn_memory_configs[index]
       nn_memory_config = nn_memory_configs[index]
 
 
       #override with new values if applicable
       #override with new values if applicable
-      if nn_max_heapsize is not None and nn_max_heapsize <= nn_memory_config['nn_heap']:
+      if nn_max_heapsize is not None and nn_memory_config['nn_heap'] <= nn_max_heapsize:
         putHdfsEnvProperty('namenode_heapsize', nn_memory_config['nn_heap'])
         putHdfsEnvProperty('namenode_heapsize', nn_memory_config['nn_heap'])
         putHdfsEnvProperty('namenode_opt_newsize', nn_memory_config['nn_opt'])
         putHdfsEnvProperty('namenode_opt_newsize', nn_memory_config['nn_opt'])
         putHdfsEnvProperty('namenode_opt_maxnewsize', nn_memory_config['nn_opt'])
         putHdfsEnvProperty('namenode_opt_maxnewsize', nn_memory_config['nn_opt'])

+ 56 - 12
ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py

@@ -1438,7 +1438,9 @@ class TestHDP22StackAdvisor(TestCase):
     }
     }
     clusterData = {
     clusterData = {
       "totalAvailableRam": 2048,
       "totalAvailableRam": 2048,
-      "hBaseInstalled": 111
+      "hBaseInstalled": True,
+      "hbaseRam": 111,
+      "reservedRam": 128
     }
     }
     expected = {
     expected = {
       'hadoop-env': {
       'hadoop-env': {
@@ -1449,7 +1451,7 @@ class TestHDP22StackAdvisor(TestCase):
         },
         },
         'property_attributes': {
         'property_attributes': {
           'dtnode_heapsize': {'maximum': '2048'},
           'dtnode_heapsize': {'maximum': '2048'},
-          'namenode_heapsize': {'maximum': '1024'}
+          'namenode_heapsize': {'maximum': '10240'}
         }
         }
       },
       },
       'hdfs-site': {
       'hdfs-site': {
@@ -1587,7 +1589,7 @@ class TestHDP22StackAdvisor(TestCase):
             "ph_cpu_count" : 1,
             "ph_cpu_count" : 1,
             "public_host_name" : "host2",
             "public_host_name" : "host2",
             "rack_info" : "/default-rack",
             "rack_info" : "/default-rack",
-            "total_mem" : 1048576
+            "total_mem" : 10485760
           }
           }
         },
         },
       ]
       ]
@@ -1595,9 +1597,9 @@ class TestHDP22StackAdvisor(TestCase):
 
 
     self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
     self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
     self.assertEquals(configurations, expected)
     self.assertEquals(configurations, expected)
-    # namenode heapsize depends on # of datanodes
+    # Test 1 - namenode heapsize depends on # of datanodes
     datanode_hostnames = services["services"][0]["components"][0]["StackServiceComponents"]["hostnames"] # datanode hostnames
     datanode_hostnames = services["services"][0]["components"][0]["StackServiceComponents"]["hostnames"] # datanode hostnames
-    for i in xrange(200):
+    for i in xrange(10):
       hostname = "datanode" + `i`
       hostname = "datanode" + `i`
       datanode_hostnames.append(hostname)
       datanode_hostnames.append(hostname)
       hosts['items'].append(
       hosts['items'].append(
@@ -1616,15 +1618,57 @@ class TestHDP22StackAdvisor(TestCase):
         }
         }
       )
       )
     self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
     self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
-    self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_heapsize"], "47872")
-    self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_opt_maxnewsize"], "6144")
-    self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_opt_maxnewsize"], "6144")
+    self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_heapsize"], "3072")
+    self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_opt_maxnewsize"], "512")
+    self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_opt_maxnewsize"], "512")
+    # Test 2 - add more datanodes
+    for i in xrange(11,30):
+      hostname = "datanode" + `i`
+      datanode_hostnames.append(hostname)
+      hosts['items'].append(
+        {
+          "href" : "/api/v1/hosts/" + hostname,
+          "Hosts" : {
+            "cpu_count" : 1,
+            "host_name" : hostname,
+            "os_arch" : "x86_64",
+            "os_type" : "centos6",
+            "ph_cpu_count" : 1,
+            "public_host_name" : hostname,
+            "rack_info" : "/default-rack",
+            "total_mem" : 2097152
+          }
+        }
+      )
     # namenode_heapsize depends on number of disks used used by datanode
     # namenode_heapsize depends on number of disks used used by datanode
-    configurations["hdfs-site"]["properties"]["dfs.datanode.data.dir"] = "/path1"
+    configurations["hdfs-site"]["properties"]["dfs.datanode.data.dir"] = "/path1,/path2,/path3,/path4"
+    self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
+    self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_heapsize"], "9984")
+    self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_opt_maxnewsize"], "1280")
+    self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_opt_maxnewsize"], "1280")
+    # Test 3 - more datanodes than host can handle
+    for i in xrange(31, 90):
+      hostname = "datanode" + `i`
+      datanode_hostnames.append(hostname)
+      hosts['items'].append(
+        {
+          "href" : "/api/v1/hosts/" + hostname,
+          "Hosts" : {
+            "cpu_count" : 1,
+            "host_name" : hostname,
+            "os_arch" : "x86_64",
+            "os_type" : "centos6",
+            "ph_cpu_count" : 1,
+            "public_host_name" : hostname,
+            "rack_info" : "/default-rack",
+            "total_mem" : 2097152
+          }
+        }
+      )
     self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
     self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
-    self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_heapsize"], "14848")
-    self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_opt_maxnewsize"], "2048")
-    self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_opt_maxnewsize"], "2048")
+    self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_heapsize"], "5000")
+    self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_opt_maxnewsize"], "1250")
+    self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_opt_maxnewsize"], "1250")
 
 
   def test_validateHDFSConfigurationsEnv(self):
   def test_validateHDFSConfigurationsEnv(self):
     configurations = {}
     configurations = {}