hadoop-env.sh 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417
  1. #
  2. # Licensed to the Apache Software Foundation (ASF) under one
  3. # or more contributor license agreements. See the NOTICE file
  4. # distributed with this work for additional information
  5. # regarding copyright ownership. The ASF licenses this file
  6. # to you under the Apache License, Version 2.0 (the
  7. # "License"); you may not use this file except in compliance
  8. # with the License. You may obtain a copy of the License at
  9. #
  10. # http://www.apache.org/licenses/LICENSE-2.0
  11. #
  12. # Unless required by applicable law or agreed to in writing, software
  13. # distributed under the License is distributed on an "AS IS" BASIS,
  14. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. # See the License for the specific language governing permissions and
  16. # limitations under the License.
  17. # Set Hadoop-specific environment variables here.
  18. ##
  19. ## THIS FILE ACTS AS THE MASTER FILE FOR ALL HADOOP PROJECTS.
  20. ## SETTINGS HERE WILL BE READ BY ALL HADOOP COMMANDS. THEREFORE,
  21. ## ONE CAN USE THIS FILE TO SET YARN, HDFS, AND MAPREDUCE
  22. ## CONFIGURATION OPTIONS INSTEAD OF xxx-env.sh.
  23. ##
  24. ## Precedence rules:
  25. ##
  26. ## {yarn-env.sh|hdfs-env.sh} > hadoop-env.sh > hard-coded defaults
  27. ##
  28. ## {YARN_xyz|HDFS_xyz} > HADOOP_xyz > hard-coded defaults
  29. ##
  30. # Many of the options here are built from the perspective that users
  31. # may want to provide OVERWRITING values on the command line.
  32. # For example:
  33. #
  34. # JAVA_HOME=/usr/java/testing hdfs dfs -ls
  35. #
  36. # Therefore, the vast majority (BUT NOT ALL!) of these defaults
  37. # are configured for substitution and not append. If append
  38. # is preferable, modify this file accordingly.
  39. ###
  40. # Generic settings for HADOOP
  41. ###
  42. # Technically, the only required environment variable is JAVA_HOME.
  43. # All others are optional. However, the defaults are probably not
  44. # preferred. Many sites configure these options outside of Hadoop,
  45. # such as in /etc/profile.d
  46. # The java implementation to use. By default, this environment
  47. # variable is REQUIRED on ALL platforms except OS X!
  48. # export JAVA_HOME=
  49. # Location of Hadoop. By default, Hadoop will attempt to determine
  50. # this location based upon its execution path.
  51. # export HADOOP_PREFIX=
  52. # Location of Hadoop's configuration information. i.e., where this
  53. # file is probably living. Many sites will also set this in the
  54. # same location where JAVA_HOME is defined. If this is not defined
  55. # Hadoop will attempt to locate it based upon its execution
  56. # path.
  57. # export HADOOP_CONF_DIR=$HADOOP_PREFIX/etc/hadoop
  58. # The maximum amount of heap to use (Java -Xmx). If no unit
  59. # is provided, it will be converted to MB. Daemons will
  60. # prefer any Xmx setting in their respective _OPT variable.
  61. # There is no default; the JVM will autoscale based upon machine
  62. # memory size.
  63. # export HADOOP_HEAPSIZE_MAX=
  64. # The minimum amount of heap to use (Java -Xms). If no unit
  65. # is provided, it will be converted to MB. Daemons will
  66. # prefer any Xms setting in their respective _OPT variable.
  67. # There is no default; the JVM will autoscale based upon machine
  68. # memory size.
  69. # export HADOOP_HEAPSIZE_MIN=
  70. # Enable extra debugging of Hadoop's JAAS binding, used to set up
  71. # Kerberos security.
  72. # export HADOOP_JAAS_DEBUG=true
  73. # Extra Java runtime options for all Hadoop commands. We don't support
  74. # IPv6 yet/still, so by default the preference is set to IPv4.
  75. # export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true"
  76. # For Kerberos debugging, an extended option set logs more invormation
  77. # export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true -Dsun.security.krb5.debug=true -Dsun.security.spnego.debug"
  78. # Some parts of the shell code may do special things dependent upon
  79. # the operating system. We have to set this here. See the next
  80. # section as to why....
  81. export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)}
  82. # Under certain conditions, Java on OS X will throw SCDynamicStore errors
  83. # in the system logs.
  84. # See HADOOP-8719 for more information. If one needs Kerberos
  85. # support on OS X, one will want to change/remove this extra bit.
  86. case ${HADOOP_OS_TYPE} in
  87. Darwin*)
  88. export HADOOP_OPTS="${HADOOP_OPTS} -Djava.security.krb5.realm= "
  89. export HADOOP_OPTS="${HADOOP_OPTS} -Djava.security.krb5.kdc= "
  90. export HADOOP_OPTS="${HADOOP_OPTS} -Djava.security.krb5.conf= "
  91. ;;
  92. esac
  93. # Extra Java runtime options for some Hadoop commands
  94. # and clients (i.e., hdfs dfs -blah). These get appended to HADOOP_OPTS for
  95. # such commands. In most cases, # this should be left empty and
  96. # let users supply it on the command line.
  97. # export HADOOP_CLIENT_OPTS=""
  98. #
  99. # A note about classpaths.
  100. #
  101. # The classpath is configured such that entries are stripped prior
  102. # to handing to Java based either upon duplication or non-existence.
  103. # Wildcards and/or directories are *NOT* expanded as the
  104. # de-duplication is fairly simple. So if two directories are in
  105. # the classpath that both contain awesome-methods-1.0.jar,
  106. # awesome-methods-1.0.jar will still be seen by java. But if
  107. # the classpath specifically has awesome-methods-1.0.jar from the
  108. # same directory listed twice, the last one will be removed.
  109. #
  110. # An additional, custom CLASSPATH. This is really meant for
  111. # end users, but as an administrator, one might want to push
  112. # something extra in here too, such as the jar to the topology
  113. # method. Just be sure to append to the existing HADOOP_USER_CLASSPATH
  114. # so end users have a way to add stuff.
  115. # export HADOOP_USER_CLASSPATH="/some/cool/path/on/your/machine"
  116. # Should HADOOP_USER_CLASSPATH be first in the official CLASSPATH?
  117. # export HADOOP_USER_CLASSPATH_FIRST="yes"
  118. # If HADOOP_USE_CLIENT_CLASSLOADER is set, HADOOP_CLASSPATH along with the main
  119. # jar are handled by a separate isolated client classloader. If it is set,
  120. # HADOOP_USER_CLASSPATH_FIRST is ignored. Can be defined by doing
  121. # export HADOOP_USE_CLIENT_CLASSLOADER=true
  122. # HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES overrides the default definition of
  123. # system classes for the client classloader when HADOOP_USE_CLIENT_CLASSLOADER
  124. # is enabled. Names ending in '.' (period) are treated as package names, and
  125. # names starting with a '-' are treated as negative matches. For example,
  126. # export HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES="-org.apache.hadoop.UserClass,java.,javax.,org.apache.hadoop."
  127. # Enable optional, bundled Hadoop features
  128. # This is a comma delimited list. It may NOT be overridden via .hadooprc
  129. # Entries may be added/removed as needed.
  130. # export HADOOP_OPTIONAL_TOOLS="@@@HADOOP_OPTIONAL_TOOLS@@@"
  131. ###
  132. # Options for remote shell connectivity
  133. ###
  134. # There are some optional components of hadoop that allow for
  135. # command and control of remote hosts. For example,
  136. # start-dfs.sh will attempt to bring up all NNs, DNS, etc.
  137. # Options to pass to SSH when one of the "log into a host and
  138. # start/stop daemons" scripts is executed
  139. # export HADOOP_SSH_OPTS="-o BatchMode=yes -o StrictHostKeyChecking=no -o ConnectTimeout=10s"
  140. # The built-in ssh handler will limit itself to 10 simultaneous connections.
  141. # For pdsh users, this sets the fanout size ( -f )
  142. # Change this to increase/decrease as necessary.
  143. # export HADOOP_SSH_PARALLEL=10
  144. # Filename which contains all of the hosts for any remote execution
  145. # helper scripts # such as slaves.sh, start-dfs.sh, etc.
  146. # export HADOOP_SLAVES="${HADOOP_CONF_DIR}/slaves"
  147. ###
  148. # Options for all daemons
  149. ###
  150. #
  151. #
  152. # Many options may also be specified as Java properties. It is
  153. # very common, and in many cases, desirable, to hard-set these
  154. # in daemon _OPTS variables. Where applicable, the appropriate
  155. # Java property is also identified. Note that many are re-used
  156. # or set differently in certain contexts (e.g., secure vs
  157. # non-secure)
  158. #
  159. # Where (primarily) daemon log files are stored. # $HADOOP_PREFIX/logs
  160. # by default.
  161. # Java property: hadoop.log.dir
  162. # export HADOOP_LOG_DIR=${HADOOP_PREFIX}/logs
  163. # A string representing this instance of hadoop. $USER by default.
  164. # This is used in writing log and pid files, so keep that in mind!
  165. # Java property: hadoop.id.str
  166. # export HADOOP_IDENT_STRING=$USER
  167. # How many seconds to pause after stopping a daemon
  168. # export HADOOP_STOP_TIMEOUT=5
  169. # Where pid files are stored. /tmp by default.
  170. # export HADOOP_PID_DIR=/tmp
  171. # Default log4j setting for interactive commands
  172. # Java property: hadoop.root.logger
  173. # export HADOOP_ROOT_LOGGER=INFO,console
  174. # Default log4j setting for daemons spawned explicitly by
  175. # --daemon option of hadoop, hdfs, mapred and yarn command.
  176. # Java property: hadoop.root.logger
  177. # export HADOOP_DAEMON_ROOT_LOGGER=INFO,RFA
  178. # Default log level and output location for security-related messages.
  179. # You will almost certainly want to change this on a per-daemon basis via
  180. # the Java property (i.e., -Dhadoop.security.logger=foo). (Note that the
  181. # defaults for the NN and 2NN override this by default.)
  182. # Java property: hadoop.security.logger
  183. # export HADOOP_SECURITY_LOGGER=INFO,NullAppender
  184. # Default log level for file system audit messages.
  185. # Generally, this is specifically set in the namenode-specific
  186. # options line.
  187. # Java property: hdfs.audit.logger
  188. # export HADOOP_AUDIT_LOGGER=INFO,NullAppender
  189. # Default process priority level
  190. # Note that sub-processes will also run at this level!
  191. # export HADOOP_NICENESS=0
  192. # Default name for the service level authorization file
  193. # Java property: hadoop.policy.file
  194. # export HADOOP_POLICYFILE="hadoop-policy.xml"
  195. #
  196. # NOTE: this is not used by default! <-----
  197. # You can define variables right here and then re-use them later on.
  198. # For example, it is common to use the same garbage collection settings
  199. # for all the daemons. So one could define:
  200. #
  201. # export HADOOP_GC_SETTINGS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps"
  202. #
  203. # .. and then use it as per the b option under the namenode.
  204. ###
  205. # Secure/privileged execution
  206. ###
  207. #
  208. # Out of the box, Hadoop uses jsvc from Apache Commons to launch daemons
  209. # on privileged ports. This functionality can be replaced by providing
  210. # custom functions. See hadoop-functions.sh for more information.
  211. #
  212. # The jsvc implementation to use. Jsvc is required to run secure datanodes
  213. # that bind to privileged ports to provide authentication of data transfer
  214. # protocol. Jsvc is not required if SASL is configured for authentication of
  215. # data transfer protocol using non-privileged ports.
  216. # export JSVC_HOME=/usr/bin
  217. #
  218. # This directory contains pids for secure and privileged processes.
  219. #export HADOOP_SECURE_PID_DIR=${HADOOP_PID_DIR}
  220. #
  221. # This directory contains the logs for secure and privileged processes.
  222. # Java property: hadoop.log.dir
  223. # export HADOOP_SECURE_LOG=${HADOOP_LOG_DIR}
  224. #
  225. # When running a secure daemon, the default value of HADOOP_IDENT_STRING
  226. # ends up being a bit bogus. Therefore, by default, the code will
  227. # replace HADOOP_IDENT_STRING with HADOOP_SECURE_xx_USER. If one wants
  228. # to keep HADOOP_IDENT_STRING untouched, then uncomment this line.
  229. # export HADOOP_SECURE_IDENT_PRESERVE="true"
  230. ###
  231. # NameNode specific parameters
  232. ###
  233. # Default log level and output location for file system related change
  234. # messages. For non-namenode daemons, the Java property must be set in
  235. # the appropriate _OPTS if one wants something other than INFO,NullAppender
  236. # Java property: hdfs.audit.logger
  237. # export HDFS_AUDIT_LOGGER=INFO,NullAppender
  238. # Specify the JVM options to be used when starting the NameNode.
  239. # These options will be appended to the options specified as HADOOP_OPTS
  240. # and therefore may override any similar flags set in HADOOP_OPTS
  241. #
  242. # a) Set JMX options
  243. # export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=1026"
  244. #
  245. # b) Set garbage collection logs
  246. # export HADOOP_NAMENODE_OPTS="${HADOOP_GC_SETTINGS} -Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')"
  247. #
  248. # c) ... or set them directly
  249. # export HADOOP_NAMENODE_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')"
  250. # this is the default:
  251. # export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS"
  252. ###
  253. # SecondaryNameNode specific parameters
  254. ###
  255. # Specify the JVM options to be used when starting the SecondaryNameNode.
  256. # These options will be appended to the options specified as HADOOP_OPTS
  257. # and therefore may override any similar flags set in HADOOP_OPTS
  258. #
  259. # This is the default:
  260. # export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS"
  261. ###
  262. # DataNode specific parameters
  263. ###
  264. # Specify the JVM options to be used when starting the DataNode.
  265. # These options will be appended to the options specified as HADOOP_OPTS
  266. # and therefore may override any similar flags set in HADOOP_OPTS
  267. #
  268. # This is the default:
  269. # export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS"
  270. # On secure datanodes, user to run the datanode as after dropping privileges.
  271. # This **MUST** be uncommented to enable secure HDFS if using privileged ports
  272. # to provide authentication of data transfer protocol. This **MUST NOT** be
  273. # defined if SASL is configured for authentication of data transfer protocol
  274. # using non-privileged ports.
  275. # This will replace the hadoop.id.str Java property in secure mode.
  276. # export HADOOP_SECURE_DN_USER=hdfs
  277. # Supplemental options for secure datanodes
  278. # By default, Hadoop uses jsvc which needs to know to launch a
  279. # server jvm.
  280. # export HADOOP_DN_SECURE_EXTRA_OPTS="-jvm server"
  281. # Where datanode log files are stored in the secure data environment.
  282. # This will replace the hadoop.log.dir Java property in secure mode.
  283. # export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_SECURE_LOG_DIR}
  284. # Where datanode pid files are stored in the secure data environment.
  285. # export HADOOP_SECURE_DN_PID_DIR=${HADOOP_SECURE_PID_DIR}
  286. ###
  287. # NFS3 Gateway specific parameters
  288. ###
  289. # Specify the JVM options to be used when starting the NFS3 Gateway.
  290. # These options will be appended to the options specified as HADOOP_OPTS
  291. # and therefore may override any similar flags set in HADOOP_OPTS
  292. #
  293. # export HADOOP_NFS3_OPTS=""
  294. # Specify the JVM options to be used when starting the Hadoop portmapper.
  295. # These options will be appended to the options specified as HADOOP_OPTS
  296. # and therefore may override any similar flags set in HADOOP_OPTS
  297. #
  298. # export HADOOP_PORTMAP_OPTS="-Xmx512m"
  299. # Supplemental options for priviliged gateways
  300. # By default, Hadoop uses jsvc which needs to know to launch a
  301. # server jvm.
  302. # export HADOOP_NFS3_SECURE_EXTRA_OPTS="-jvm server"
  303. # On privileged gateways, user to run the gateway as after dropping privileges
  304. # This will replace the hadoop.id.str Java property in secure mode.
  305. # export HADOOP_PRIVILEGED_NFS_USER=nfsserver
  306. ###
  307. # ZKFailoverController specific parameters
  308. ###
  309. # Specify the JVM options to be used when starting the ZKFailoverController.
  310. # These options will be appended to the options specified as HADOOP_OPTS
  311. # and therefore may override any similar flags set in HADOOP_OPTS
  312. #
  313. # export HADOOP_ZKFC_OPTS=""
  314. ###
  315. # QuorumJournalNode specific parameters
  316. ###
  317. # Specify the JVM options to be used when starting the QuorumJournalNode.
  318. # These options will be appended to the options specified as HADOOP_OPTS
  319. # and therefore may override any similar flags set in HADOOP_OPTS
  320. #
  321. # export HADOOP_JOURNALNODE_OPTS=""
  322. ###
  323. # HDFS Balancer specific parameters
  324. ###
  325. # Specify the JVM options to be used when starting the HDFS Balancer.
  326. # These options will be appended to the options specified as HADOOP_OPTS
  327. # and therefore may override any similar flags set in HADOOP_OPTS
  328. #
  329. # export HADOOP_BALANCER_OPTS=""
  330. ###
  331. # HDFS Mover specific parameters
  332. ###
  333. # Specify the JVM options to be used when starting the HDFS Mover.
  334. # These options will be appended to the options specified as HADOOP_OPTS
  335. # and therefore may override any similar flags set in HADOOP_OPTS
  336. #
  337. # export HADOOP_MOVER_OPTS=""
  338. ###
  339. # Advanced Users Only!
  340. ###
  341. #
  342. # When building Hadoop, one can add the class paths to the commands
  343. # via this special env var:
  344. # export HADOOP_ENABLE_BUILD_PATHS="true"
  345. #
  346. # To prevent accidents, shell commands be (superficially) locked
  347. # to only allow certain users to execute certain subcommands.
  348. #
  349. # For example, to limit who can execute the namenode command,
  350. # export HADOOP_namenode_USER=hdfs