|
|
@@ -0,0 +1,396 @@
|
|
|
+<?xml version="1.0"?>
|
|
|
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
|
|
+
|
|
|
+<!--
|
|
|
+ Licensed to the Apache Software Foundation (ASF) under one or more
|
|
|
+ contributor license agreements. See the NOTICE file distributed with
|
|
|
+ this work for additional information regarding copyright ownership.
|
|
|
+ The ASF licenses this file to You under the Apache License, Version 2.0
|
|
|
+ (the "License"); you may not use this file except in compliance with
|
|
|
+ the License. You may obtain a copy of the License at
|
|
|
+
|
|
|
+ http://www.apache.org/licenses/LICENSE-2.0
|
|
|
+
|
|
|
+ Unless required by applicable law or agreed to in writing, software
|
|
|
+ distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
+ See the License for the specific language governing permissions and
|
|
|
+ limitations under the License.
|
|
|
+-->
|
|
|
+
|
|
|
+<!-- Put site-specific property overrides in this file. -->
|
|
|
+
|
|
|
+<configuration>
|
|
|
+
|
|
|
+<!-- file system properties -->
|
|
|
+
|
|
|
+ <property>
|
|
|
+ <name>dfs.name.dir</name>
|
|
|
+ <!-- cluster variant -->
|
|
|
+ <value>/mnt/hmc/hadoop/hdfs/namenode</value>
|
|
|
+ <description>Determines where on the local filesystem the DFS name node
|
|
|
+ should store the name table. If this is a comma-delimited list
|
|
|
+ of directories then the name table is replicated in all of the
|
|
|
+ directories, for redundancy. </description>
|
|
|
+ <final>true</final>
|
|
|
+ </property>
|
|
|
+
|
|
|
+ <property>
|
|
|
+ <name>dfs.support.append</name>
|
|
|
+ <value>true</value>
|
|
|
+ <description>to enable dfs append</description>
|
|
|
+ <final>true</final>
|
|
|
+ </property>
|
|
|
+
|
|
|
+ <property>
|
|
|
+ <name>dfs.webhdfs.enabled</name>
|
|
|
+ <value>false</value>
|
|
|
+ <description>to enable webhdfs</description>
|
|
|
+ <final>true</final>
|
|
|
+ </property>
|
|
|
+
|
|
|
+ <property>
|
|
|
+ <name>dfs.datanode.failed.volumes.tolerated</name>
|
|
|
+ <value>0</value>
|
|
|
+ <description>#of failed disks dn would tolerate</description>
|
|
|
+ <final>true</final>
|
|
|
+ </property>
|
|
|
+
|
|
|
+ <property>
|
|
|
+ <name>dfs.block.local-path-access.user</name>
|
|
|
+ <value>hbase</value>
|
|
|
+ <description>the user who is allowed to perform short
|
|
|
+ circuit reads.
|
|
|
+ </description>
|
|
|
+ <final>true</final>
|
|
|
+ </property>
|
|
|
+
|
|
|
+ <property>
|
|
|
+ <name>dfs.data.dir</name>
|
|
|
+ <value>/mnt/hmc/hadoop/hdfs/data</value>
|
|
|
+ <description>Determines where on the local filesystem an DFS data node
|
|
|
+ should store its blocks. If this is a comma-delimited
|
|
|
+ list of directories, then data will be stored in all named
|
|
|
+ directories, typically on different devices.
|
|
|
+ Directories that do not exist are ignored.
|
|
|
+ </description>
|
|
|
+ <final>true</final>
|
|
|
+ </property>
|
|
|
+
|
|
|
+ <property>
|
|
|
+ <name>dfs.hosts.exclude</name>
|
|
|
+ <value>/etc/hadoop/conf/dfs.exclude</value>
|
|
|
+ <description>Names a file that contains a list of hosts that are
|
|
|
+ not permitted to connect to the namenode. The full pathname of the
|
|
|
+ file must be specified. If the value is empty, no hosts are
|
|
|
+ excluded.</description>
|
|
|
+ </property>
|
|
|
+
|
|
|
+ <property>
|
|
|
+ <name>dfs.hosts</name>
|
|
|
+ <value>/etc/hadoop/conf/dfs.include</value>
|
|
|
+ <description>Names a file that contains a list of hosts that are
|
|
|
+ permitted to connect to the namenode. The full pathname of the file
|
|
|
+ must be specified. If the value is empty, all hosts are
|
|
|
+ permitted.</description>
|
|
|
+ </property>
|
|
|
+
|
|
|
+ <property>
|
|
|
+ <name>dfs.replication.max</name>
|
|
|
+ <value>50</value>
|
|
|
+ <description>Maximal block replication.
|
|
|
+ </description>
|
|
|
+ </property>
|
|
|
+
|
|
|
+ <property>
|
|
|
+ <name>dfs.replication</name>
|
|
|
+ <value>3</value>
|
|
|
+ <description>Default block replication.
|
|
|
+ </description>
|
|
|
+ </property>
|
|
|
+
|
|
|
+ <property>
|
|
|
+ <name>dfs.heartbeat.interval</name>
|
|
|
+ <value>3</value>
|
|
|
+ <description>Determines datanode heartbeat interval in seconds.</description>
|
|
|
+ </property>
|
|
|
+
|
|
|
+ <property>
|
|
|
+ <name>dfs.safemode.threshold.pct</name>
|
|
|
+ <value>1.0f</value>
|
|
|
+ <description>
|
|
|
+ Specifies the percentage of blocks that should satisfy
|
|
|
+ the minimal replication requirement defined by dfs.replication.min.
|
|
|
+ Values less than or equal to 0 mean not to start in safe mode.
|
|
|
+ Values greater than 1 will make safe mode permanent.
|
|
|
+ </description>
|
|
|
+ </property>
|
|
|
+
|
|
|
+ <property>
|
|
|
+ <name>dfs.balance.bandwidthPerSec</name>
|
|
|
+ <value>6250000</value>
|
|
|
+ <description>
|
|
|
+ Specifies the maximum amount of bandwidth that each datanode
|
|
|
+ can utilize for the balancing purpose in term of
|
|
|
+ the number of bytes per second.
|
|
|
+ </description>
|
|
|
+ </property>
|
|
|
+
|
|
|
+ <property>
|
|
|
+ <name>dfs.datanode.address</name>
|
|
|
+ <value>0.0.0.0:50010</value>
|
|
|
+ </property>
|
|
|
+
|
|
|
+ <property>
|
|
|
+ <name>dfs.datanode.http.address</name>
|
|
|
+ <value>0.0.0.0:50075</value>
|
|
|
+ </property>
|
|
|
+
|
|
|
+ <property>
|
|
|
+ <name>dfs.block.size</name>
|
|
|
+ <value>134217728</value>
|
|
|
+ <description>The default block size for new files.</description>
|
|
|
+ </property>
|
|
|
+
|
|
|
+ <property>
|
|
|
+ <name>dfs.http.address</name>
|
|
|
+ <value>hdp1.cybervisiontech.com.ua:50070</value>
|
|
|
+<description>The name of the default file system. Either the
|
|
|
+literal string "local" or a host:port for HDFS.</description>
|
|
|
+<final>true</final>
|
|
|
+</property>
|
|
|
+
|
|
|
+<property>
|
|
|
+<name>dfs.datanode.du.reserved</name>
|
|
|
+<!-- cluster variant -->
|
|
|
+<value>1073741824</value>
|
|
|
+<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
|
|
|
+</description>
|
|
|
+</property>
|
|
|
+
|
|
|
+<property>
|
|
|
+<name>dfs.datanode.ipc.address</name>
|
|
|
+<value>0.0.0.0:8010</value>
|
|
|
+<description>
|
|
|
+The datanode ipc server address and port.
|
|
|
+If the port is 0 then the server will start on a free port.
|
|
|
+</description>
|
|
|
+</property>
|
|
|
+
|
|
|
+<property>
|
|
|
+<name>dfs.blockreport.initialDelay</name>
|
|
|
+<value>120</value>
|
|
|
+<description>Delay for first block report in seconds.</description>
|
|
|
+</property>
|
|
|
+
|
|
|
+<property>
|
|
|
+<name>dfs.namenode.handler.count</name>
|
|
|
+<value>40</value>
|
|
|
+<description>The number of server threads for the namenode.</description>
|
|
|
+</property>
|
|
|
+
|
|
|
+<property>
|
|
|
+<name>dfs.datanode.max.xcievers</name>
|
|
|
+<value>1024</value>
|
|
|
+<description>PRIVATE CONFIG VARIABLE</description>
|
|
|
+</property>
|
|
|
+
|
|
|
+<!-- Permissions configuration -->
|
|
|
+
|
|
|
+<property>
|
|
|
+<name>dfs.umaskmode</name>
|
|
|
+<value>077</value>
|
|
|
+<description>
|
|
|
+The octal umask used when creating files and directories.
|
|
|
+</description>
|
|
|
+</property>
|
|
|
+
|
|
|
+<property>
|
|
|
+<name>dfs.web.ugi</name>
|
|
|
+<!-- cluster variant -->
|
|
|
+<value>gopher,gopher</value>
|
|
|
+<description>The user account used by the web interface.
|
|
|
+Syntax: USERNAME,GROUP1,GROUP2, ...
|
|
|
+</description>
|
|
|
+</property>
|
|
|
+
|
|
|
+<property>
|
|
|
+<name>dfs.permissions</name>
|
|
|
+<value>true</value>
|
|
|
+<description>
|
|
|
+If "true", enable permission checking in HDFS.
|
|
|
+If "false", permission checking is turned off,
|
|
|
+but all other behavior is unchanged.
|
|
|
+Switching from one parameter value to the other does not change the mode,
|
|
|
+owner or group of files or directories.
|
|
|
+</description>
|
|
|
+</property>
|
|
|
+
|
|
|
+<property>
|
|
|
+<name>dfs.permissions.supergroup</name>
|
|
|
+<value>hdfs</value>
|
|
|
+<description>The name of the group of super-users.</description>
|
|
|
+</property>
|
|
|
+
|
|
|
+<property>
|
|
|
+<name>dfs.namenode.handler.count</name>
|
|
|
+<value>100</value>
|
|
|
+<description>Added to grow Queue size so that more client connections are allowed</description>
|
|
|
+</property>
|
|
|
+
|
|
|
+<property>
|
|
|
+<name>ipc.server.max.response.size</name>
|
|
|
+<value>5242880</value>
|
|
|
+</property>
|
|
|
+<property>
|
|
|
+<name>dfs.block.access.token.enable</name>
|
|
|
+<value>true</value>
|
|
|
+<description>
|
|
|
+If "true", access tokens are used as capabilities for accessing datanodes.
|
|
|
+If "false", no access tokens are checked on accessing datanodes.
|
|
|
+</description>
|
|
|
+</property>
|
|
|
+
|
|
|
+<property>
|
|
|
+<name>dfs.namenode.kerberos.principal</name>
|
|
|
+<value>nn/_HOST@</value>
|
|
|
+<description>
|
|
|
+Kerberos principal name for the NameNode
|
|
|
+</description>
|
|
|
+</property>
|
|
|
+
|
|
|
+<property>
|
|
|
+<name>dfs.secondary.namenode.kerberos.principal</name>
|
|
|
+<value>nn/_HOST@</value>
|
|
|
+ <description>
|
|
|
+ Kerberos principal name for the secondary NameNode.
|
|
|
+ </description>
|
|
|
+ </property>
|
|
|
+
|
|
|
+
|
|
|
+<!--
|
|
|
+ This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
|
|
|
+-->
|
|
|
+ <property>
|
|
|
+ <name>dfs.namenode.kerberos.https.principal</name>
|
|
|
+ <value>host/_HOST@</value>
|
|
|
+ <description>The Kerberos principal for the host that the NameNode runs on.</description>
|
|
|
+
|
|
|
+ </property>
|
|
|
+
|
|
|
+ <property>
|
|
|
+ <name>dfs.secondary.namenode.kerberos.https.principal</name>
|
|
|
+ <value>host/_HOST@</value>
|
|
|
+ <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
|
|
|
+
|
|
|
+ </property>
|
|
|
+
|
|
|
+ <property>
|
|
|
+ <!-- cluster variant -->
|
|
|
+ <name>dfs.secondary.http.address</name>
|
|
|
+ <value>hdp2.cybervisiontech.com.ua:50090</value>
|
|
|
+ <description>Address of secondary namenode web server</description>
|
|
|
+ </property>
|
|
|
+
|
|
|
+ <property>
|
|
|
+ <name>dfs.secondary.https.port</name>
|
|
|
+ <value>50490</value>
|
|
|
+ <description>The https port where secondary-namenode binds</description>
|
|
|
+ </property>
|
|
|
+
|
|
|
+ <property>
|
|
|
+ <name>dfs.web.authentication.kerberos.principal</name>
|
|
|
+ <value>HTTP/_HOST@</value>
|
|
|
+ <description>
|
|
|
+ The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
|
|
|
+ The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
|
|
|
+ HTTP SPENGO specification.
|
|
|
+ </description>
|
|
|
+ </property>
|
|
|
+
|
|
|
+ <property>
|
|
|
+ <name>dfs.web.authentication.kerberos.keytab</name>
|
|
|
+ <value>/nn.service.keytab</value>
|
|
|
+ <description>
|
|
|
+ The Kerberos keytab file with the credentials for the
|
|
|
+ HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
|
|
|
+ </description>
|
|
|
+ </property>
|
|
|
+
|
|
|
+ <property>
|
|
|
+ <name>dfs.datanode.kerberos.principal</name>
|
|
|
+ <value>dn/_HOST@</value>
|
|
|
+ <description>
|
|
|
+ The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
|
|
|
+ </description>
|
|
|
+ </property>
|
|
|
+
|
|
|
+ <property>
|
|
|
+ <name>dfs.namenode.keytab.file</name>
|
|
|
+ <value>/nn.service.keytab</value>
|
|
|
+ <description>
|
|
|
+ Combined keytab file containing the namenode service and host principals.
|
|
|
+ </description>
|
|
|
+ </property>
|
|
|
+
|
|
|
+ <property>
|
|
|
+ <name>dfs.secondary.namenode.keytab.file</name>
|
|
|
+ <value>/nn.service.keytab</value>
|
|
|
+ <description>
|
|
|
+ Combined keytab file containing the namenode service and host principals.
|
|
|
+ </description>
|
|
|
+ </property>
|
|
|
+
|
|
|
+ <property>
|
|
|
+ <name>dfs.datanode.keytab.file</name>
|
|
|
+ <value>/dn.service.keytab</value>
|
|
|
+ <description>
|
|
|
+ The filename of the keytab file for the DataNode.
|
|
|
+ </description>
|
|
|
+ </property>
|
|
|
+
|
|
|
+ <property>
|
|
|
+ <name>dfs.https.port</name>
|
|
|
+ <value>50470</value>
|
|
|
+ <description>The https port where namenode binds</description>
|
|
|
+
|
|
|
+ </property>
|
|
|
+
|
|
|
+ <property>
|
|
|
+ <name>dfs.https.address</name>
|
|
|
+ <value>hdp1.cybervisiontech.com.ua:50470</value>
|
|
|
+ <description>The https address where namenode binds</description>
|
|
|
+
|
|
|
+ </property>
|
|
|
+
|
|
|
+ <property>
|
|
|
+ <name>dfs.datanode.data.dir.perm</name>
|
|
|
+ <value>750</value>
|
|
|
+<description>The permissions that should be there on dfs.data.dir
|
|
|
+directories. The datanode will not come up if the permissions are
|
|
|
+different on existing dfs.data.dir directories. If the directories
|
|
|
+don't exist, they will be created with this permission.</description>
|
|
|
+ </property>
|
|
|
+
|
|
|
+ <property>
|
|
|
+ <name>dfs.access.time.precision</name>
|
|
|
+ <value>0</value>
|
|
|
+ <description>The access time for HDFS file is precise upto this value.
|
|
|
+ The default value is 1 hour. Setting a value of 0 disables
|
|
|
+ access times for HDFS.
|
|
|
+ </description>
|
|
|
+</property>
|
|
|
+
|
|
|
+<property>
|
|
|
+ <name>dfs.cluster.administrators</name>
|
|
|
+ <value> hdfs</value>
|
|
|
+ <description>ACL for who all can view the default servlets in the HDFS</description>
|
|
|
+</property>
|
|
|
+
|
|
|
+<property>
|
|
|
+ <name>ipc.server.read.threadpool.size</name>
|
|
|
+ <value>5</value>
|
|
|
+ <description></description>
|
|
|
+</property>
|
|
|
+
|
|
|
+</configuration>
|