|
@@ -1,329 +0,0 @@
|
|
|
-<?xml version="1.0"?>
|
|
|
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
|
|
-<!--
|
|
|
-Licensed to the Apache Software Foundation (ASF) under one or more
|
|
|
-contributor license agreements. See the NOTICE file distributed with
|
|
|
-this work for additional information regarding copyright ownership.
|
|
|
-The ASF licenses this file to You under the Apache License, Version 2.0
|
|
|
-(the "License"); you may not use this file except in compliance with
|
|
|
-the License. You may obtain a copy of the License at
|
|
|
-
|
|
|
-http://www.apache.org/licenses/LICENSE-2.0
|
|
|
-
|
|
|
-Unless required by applicable law or agreed to in writing, software
|
|
|
-distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
-See the License for the specific language governing permissions and
|
|
|
-limitations under the License.
|
|
|
--->
|
|
|
-
|
|
|
-<configuration supports_final="true">
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>hive.heapsize</name>
|
|
|
- <value>1024</value>
|
|
|
- <description>Hive Java heap size</description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>ambari.hive.db.schema.name</name>
|
|
|
- <value>hive</value>
|
|
|
- <description>Database name used as the Hive Metastore</description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>javax.jdo.option.ConnectionURL</name>
|
|
|
- <value>jdbc:mysql://localhost/hive?createDatabaseIfNotExist=true</value>
|
|
|
- <description>JDBC connect string for a JDBC metastore</description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>javax.jdo.option.ConnectionDriverName</name>
|
|
|
- <value>com.mysql.jdbc.Driver</value>
|
|
|
- <description>Driver class name for a JDBC metastore</description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>javax.jdo.option.ConnectionUserName</name>
|
|
|
- <value>hive</value>
|
|
|
- <description>username to use against metastore database</description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property require-input="true">
|
|
|
- <name>javax.jdo.option.ConnectionPassword</name>
|
|
|
- <value> </value>
|
|
|
- <property-type>PASSWORD</property-type>
|
|
|
- <description>password to use against metastore database</description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>hive.metastore.warehouse.dir</name>
|
|
|
- <value>/apps/hive/warehouse</value>
|
|
|
- <description>location of default database for the warehouse</description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>hive.metastore.sasl.enabled</name>
|
|
|
- <value>false</value>
|
|
|
- <description>If true, the metastore thrift interface will be secured with SASL.
|
|
|
- Clients must authenticate with Kerberos.</description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>hive.metastore.cache.pinobjtypes</name>
|
|
|
- <value>Table,Database,Type,FieldSchema,Order</value>
|
|
|
- <description>List of comma separated metastore object types that should be pinned in the cache</description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>hive.metastore.uris</name>
|
|
|
- <value>thrift://localhost:9083</value>
|
|
|
- <description>URI for client to contact metastore server</description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>hive.metastore.pre.event.listeners</name>
|
|
|
- <value>org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener</value>
|
|
|
- <description>Pre-event listener classes to be loaded on the metastore side to run code
|
|
|
- whenever databases, tables, and partitions are created, altered, or dropped.
|
|
|
- Set to org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener
|
|
|
- if metastore-side authorization is desired.</description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>hive.metastore.pre.event.listeners</name>
|
|
|
- <value>org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener</value>
|
|
|
- <description>Pre-event listener classes to be loaded on the metastore side to run code
|
|
|
- whenever databases, tables, and partitions are created, altered, or dropped.
|
|
|
- Set to org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener
|
|
|
- if metastore-side authorization is desired.</description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>hive.metastore.client.socket.timeout</name>
|
|
|
- <value>60</value>
|
|
|
- <description>MetaStore Client socket timeout in seconds</description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>hive.metastore.execute.setugi</name>
|
|
|
- <value>true</value>
|
|
|
- <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>hive.security.authorization.enabled</name>
|
|
|
- <value>false</value>
|
|
|
- <description>enable or disable the hive client authorization</description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>hive.security.authorization.manager</name>
|
|
|
- <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
|
|
|
- <description>the hive client authorization manager class name.
|
|
|
- The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider. </description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>hive.security.metastore.authorization.manager</name>
|
|
|
- <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
|
|
|
- <description>The authorization manager class name to be used in the metastore for authorization. The user-defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider. </description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>hive.security.authenticator.manager</name>
|
|
|
- <value>org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator</value>
|
|
|
- <description>Hive client authenticator manager class name. The user-defined authenticator class should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider. </description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>hive.server2.enable.doAs</name>
|
|
|
- <value>true</value>
|
|
|
- <description>Impersonate the connected user. By default HiveServer2 performs the query processing as the user who
|
|
|
- submitted the query. But if the parameter is set to false, the query will run as the user that the hiveserver2
|
|
|
- process runs as.
|
|
|
- </description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>hive.server2.enable.impersonation</name>
|
|
|
- <description>Enable user impersonation for HiveServer2</description>
|
|
|
- <value>true</value>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>hive.server2.authentication</name>
|
|
|
- <description>Authentication mode, default NONE. Options are NONE, NOSASL, KERBEROS, LDAP, PAM and CUSTOM</description>
|
|
|
- <value>NONE</value>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>fs.hdfs.impl.disable.cache</name>
|
|
|
- <value>true</value>
|
|
|
- <description>Disable HDFS filesystem cache.</description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>fs.file.impl.disable.cache</name>
|
|
|
- <value>true</value>
|
|
|
- <description>Disable local filesystem cache.</description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>hive.enforce.bucketing</name>
|
|
|
- <value>true</value>
|
|
|
- <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.</description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>hive.enforce.sorting</name>
|
|
|
- <value>true</value>
|
|
|
- <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.</description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>hive.map.aggr</name>
|
|
|
- <value>true</value>
|
|
|
- <description>Whether to use map-side aggregation in Hive Group By queries.</description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>hive.optimize.bucketmapjoin</name>
|
|
|
- <value>true</value>
|
|
|
- <description>If the tables being joined are bucketized on the join columns, and the number of buckets in one table
|
|
|
- is a multiple of the number of buckets in the other table, the buckets can be joined with each other by setting
|
|
|
- this parameter as true.
|
|
|
- </description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>hive.optimize.bucketmapjoin.sortedmerge</name>
|
|
|
- <value>false</value>
|
|
|
- <description> If the tables being joined are sorted and bucketized on the join columns, and they have the same number
|
|
|
- of buckets, a sort-merge join can be performed by setting this parameter as true.
|
|
|
- </description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>hive.mapred.reduce.tasks.speculative.execution</name>
|
|
|
- <value>false</value>
|
|
|
- <description>Whether speculative execution for reducers should be turned on.</description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>hive.auto.convert.join</name>
|
|
|
- <value>true</value>
|
|
|
- <description>Whether Hive enable the optimization about converting common
|
|
|
- join into mapjoin based on the input file size.</description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>hive.auto.convert.sortmerge.join</name>
|
|
|
- <value>true</value>
|
|
|
- <description>Will the join be automatically converted to a sort-merge join, if the joined tables pass
|
|
|
- the criteria for sort-merge join.
|
|
|
- </description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
|
|
|
- <value>true</value>
|
|
|
- <description>Required to Enable the conversion of an SMB (Sort-Merge-Bucket) to a map-join SMB.</description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>hive.auto.convert.join.noconditionaltask</name>
|
|
|
- <value>true</value>
|
|
|
- <description>Whether Hive enable the optimization about converting common join into mapjoin based on the input file
|
|
|
- size. If this paramater is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
|
|
|
- specified size, the join is directly converted to a mapjoin (there is no conditional task).
|
|
|
- </description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>hive.auto.convert.join.noconditionaltask.size</name>
|
|
|
- <value>1000000000</value>
|
|
|
- <description>If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. However, if it
|
|
|
- is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
|
|
|
- converted to a mapjoin(there is no conditional task). The default is 10MB.
|
|
|
- </description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>hive.optimize.reducededuplication.min.reducer</name>
|
|
|
- <value>1</value>
|
|
|
- <description>Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS.
|
|
|
- That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
|
|
|
- The optimization will be disabled if number of reducers is less than specified value.
|
|
|
- </description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>hive.optimize.mapjoin.mapreduce</name>
|
|
|
- <value>true</value>
|
|
|
- <description>If hive.auto.convert.join is off, this parameter does not take
|
|
|
- affect. If it is on, and if there are map-join jobs followed by a map-reduce
|
|
|
- job (for e.g a group by), each map-only job is merged with the following
|
|
|
- map-reduce job.
|
|
|
- </description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>hive.mapjoin.bucket.cache.size</name>
|
|
|
- <value>10000</value>
|
|
|
- <description>
|
|
|
- Size per reducer.The default is 1G, i.e if the input size is 10G, it
|
|
|
- will use 10 reducers.
|
|
|
- </description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>hive.vectorized.execution.enabled</name>
|
|
|
- <value>false</value>
|
|
|
- <description>This flag controls the vectorized mode of query execution as documented in HIVE-4160 (as of Hive 0.13.0)
|
|
|
- </description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>hive.optimize.reducededuplication</name>
|
|
|
- <value>true</value>
|
|
|
- <description>Remove extra map-reduce jobs if the data is already clustered by the same key which needs to be used again.
|
|
|
- </description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>hive.optimize.index.filter</name>
|
|
|
- <value>true</value>
|
|
|
- <description>
|
|
|
- Whether to enable automatic use of indexes
|
|
|
- </description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>hive.server2.thrift.port</name>
|
|
|
- <value>10000</value>
|
|
|
- <description>
|
|
|
- TCP port number to listen on, default 10000.
|
|
|
- </description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>hive.server2.support.dynamic.service.discovery</name>
|
|
|
- <value>false</value>
|
|
|
- <description>Whether HiveServer2 supports dynamic service discovery for its
|
|
|
- clients. To support this, each instance of HiveServer2 currently uses
|
|
|
- ZooKeeper to register itself, when it is brought up. JDBC/ODBC clients
|
|
|
- should use the ZooKeeper ensemble: hive.zookeeper.quorum in their
|
|
|
- connection string.
|
|
|
- </description>
|
|
|
- </property>
|
|
|
-
|
|
|
- <property>
|
|
|
- <name>hive.server2.zookeeper.namespace</name>
|
|
|
- <value>hiveserver2</value>
|
|
|
- <description>The parent node in ZooKeeper used by HiveServer2 when
|
|
|
- supporting dynamic service discovery.
|
|
|
- </description>
|
|
|
- </property>
|
|
|
-
|
|
|
-</configuration>
|