yarn-default.xml 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679
  1. <?xml version="1.0"?>
  2. <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
  3. <!--
  4. Licensed to the Apache Software Foundation (ASF) under one or more
  5. contributor license agreements. See the NOTICE file distributed with
  6. this work for additional information regarding copyright ownership.
  7. The ASF licenses this file to You under the Apache License, Version 2.0
  8. (the "License"); you may not use this file except in compliance with
  9. the License. You may obtain a copy of the License at
  10. http://www.apache.org/licenses/LICENSE-2.0
  11. Unless required by applicable law or agreed to in writing, software
  12. distributed under the License is distributed on an "AS IS" BASIS,
  13. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. See the License for the specific language governing permissions and
  15. limitations under the License.
  16. -->
  17. <!-- Do not modify this file directly. Instead, copy entries that you -->
  18. <!-- wish to modify from this file into yarn-site.xml and change them -->
  19. <!-- there. If yarn-site.xml does not already exist, create it. -->
  20. <configuration>
  21. <!-- IPC Configs -->
  22. <property>
  23. <description>Factory to create client IPC classes.</description>
  24. <name>yarn.ipc.client.factory.class</name>
  25. </property>
  26. <property>
  27. <description>Type of serialization to use.</description>
  28. <name>yarn.ipc.serializer.type</name>
  29. <value>protocolbuffers</value>
  30. </property>
  31. <property>
  32. <description>Factory to create server IPC classes.</description>
  33. <name>yarn.ipc.server.factory.class</name>
  34. </property>
  35. <property>
  36. <description>Factory to create IPC exceptions.</description>
  37. <name>yarn.ipc.exception.factory.class</name>
  38. </property>
  39. <property>
  40. <description>Factory to create serializeable records.</description>
  41. <name>yarn.ipc.record.factory.class</name>
  42. </property>
  43. <property>
  44. <description>RPC class implementation</description>
  45. <name>yarn.ipc.rpc.class</name>
  46. <value>org.apache.hadoop.yarn.ipc.HadoopYarnProtoRPC</value>
  47. </property>
  48. <!-- Resource Manager Configs -->
  49. <property>
  50. <description>The address of the applications manager interface in the RM.</description>
  51. <name>yarn.resourcemanager.address</name>
  52. <value>0.0.0.0:8032</value>
  53. </property>
  54. <property>
  55. <description>The number of threads used to handle applications manager requests.</description>
  56. <name>yarn.resourcemanager.client.thread-count</name>
  57. <value>50</value>
  58. </property>
  59. <property>
  60. <description>The expiry interval for application master reporting.</description>
  61. <name>yarn.am.liveness-monitor.expiry-interval-ms</name>
  62. <value>600000</value>
  63. </property>
  64. <property>
  65. <description>The Kerberos principal for the resource manager.</description>
  66. <name>yarn.resourcemanager.principal</name>
  67. </property>
  68. <property>
  69. <description>The address of the scheduler interface.</description>
  70. <name>yarn.resourcemanager.scheduler.address</name>
  71. <value>0.0.0.0:8030</value>
  72. </property>
  73. <property>
  74. <description>Number of threads to handle scheduler interface.</description>
  75. <name>yarn.resourcemanager.scheduler.client.thread-count</name>
  76. <value>50</value>
  77. </property>
  78. <property>
  79. <description>The address of the RM web application.</description>
  80. <name>yarn.resourcemanager.webapp.address</name>
  81. <value>0.0.0.0:8088</value>
  82. </property>
  83. <property>
  84. <name>yarn.resourcemanager.resource-tracker.address</name>
  85. <value>0.0.0.0:8031</value>
  86. </property>
  87. <property>
  88. <description>Are acls enabled.</description>
  89. <name>yarn.acl.enable</name>
  90. <value>true</value>
  91. </property>
  92. <property>
  93. <description>ACL of who can be admin of the YARN cluster.</description>
  94. <name>yarn.admin.acl</name>
  95. <value>*</value>
  96. </property>
  97. <property>
  98. <description>The address of the RM admin interface.</description>
  99. <name>yarn.resourcemanager.admin.address</name>
  100. <value>0.0.0.0:8033</value>
  101. </property>
  102. <property>
  103. <description>Number of threads used to handle RM admin interface.</description>
  104. <name>yarn.resourcemanager.admin.client.thread-count</name>
  105. <value>1</value>
  106. </property>
  107. <property>
  108. <description>How often should the RM check that the AM is still alive.</description>
  109. <name>yarn.resourcemanager.amliveliness-monitor.interval-ms</name>
  110. <value>1000</value>
  111. </property>
  112. <property>
  113. <description>The maximum number of application attempts. It's a global
  114. setting for all application masters. Each application master can specify
  115. its individual maximum number of application attempts via the API, but the
  116. individual number cannot be more than the global upper bound. If it is,
  117. the resourcemanager will override it.</description>
  118. <name>yarn.resourcemanager.am.max-attempts</name>
  119. <value>1</value>
  120. </property>
  121. <property>
  122. <description>How often to check that containers are still alive. </description>
  123. <name>yarn.resourcemanager.container.liveness-monitor.interval-ms</name>
  124. <value>600000</value>
  125. </property>
  126. <property>
  127. <description>The keytab for the resource manager.</description>
  128. <name>yarn.resourcemanager.keytab</name>
  129. <value>/etc/krb5.keytab</value>
  130. </property>
  131. <property>
  132. <description>How long to wait until a node manager is considered dead.</description>
  133. <name>yarn.nm.liveness-monitor.expiry-interval-ms</name>
  134. <value>600000</value>
  135. </property>
  136. <property>
  137. <description>How often to check that node managers are still alive.</description>
  138. <name>yarn.resourcemanager.nm.liveness-monitor.interval-ms</name>
  139. <value>1000</value>
  140. </property>
  141. <property>
  142. <description>Path to file with nodes to include.</description>
  143. <name>yarn.resourcemanager.nodes.include-path</name>
  144. <value></value>
  145. </property>
  146. <property>
  147. <description>Path to file with nodes to exclude.</description>
  148. <name>yarn.resourcemanager.nodes.exclude-path</name>
  149. <value></value>
  150. </property>
  151. <property>
  152. <description>Number of threads to handle resource tracker calls.</description>
  153. <name>yarn.resourcemanager.resource-tracker.client.thread-count</name>
  154. <value>50</value>
  155. </property>
  156. <property>
  157. <description>The class to use as the resource scheduler.</description>
  158. <name>yarn.resourcemanager.scheduler.class</name>
  159. <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
  160. </property>
  161. <property>
  162. <description>The minimum allocation for every container request at the RM,
  163. in MBs. Memory requests lower than this won't take effect,
  164. and the specified value will get allocated at minimum.</description>
  165. <name>yarn.scheduler.minimum-allocation-mb</name>
  166. <value>1024</value>
  167. </property>
  168. <property>
  169. <description>The maximum allocation for every container request at the RM,
  170. in MBs. Memory requests higher than this won't take effect,
  171. and will get capped to this value.</description>
  172. <name>yarn.scheduler.maximum-allocation-mb</name>
  173. <value>8192</value>
  174. </property>
  175. <property>
  176. <description>The minimum allocation for every container request at the RM,
  177. in terms of virtual CPU cores. Requests lower than this won't take effect,
  178. and the specified value will get allocated the minimum.</description>
  179. <name>yarn.scheduler.minimum-allocation-vcores</name>
  180. <value>1</value>
  181. </property>
  182. <property>
  183. <description>The maximum allocation for every container request at the RM,
  184. in terms of virtual CPU cores. Requests higher than this won't take effect,
  185. and will get capped to this value.</description>
  186. <name>yarn.scheduler.maximum-allocation-vcores</name>
  187. <value>32</value>
  188. </property>
  189. <property>
  190. <description>Enable RM to recover state after starting. If true, then
  191. yarn.resourcemanager.store.class must be specified</description>
  192. <name>yarn.resourcemanager.recovery.enabled</name>
  193. <value>false</value>
  194. </property>
  195. <property>
  196. <description>The class to use as the persistent store.</description>
  197. <name>yarn.resourcemanager.store.class</name>
  198. <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore</value>
  199. </property>
  200. <property>
  201. <description>URI pointing to the location of the FileSystem path where
  202. RM state will be stored. This must be supplied when using
  203. org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore
  204. as the value for yarn.resourcemanager.store.class</description>
  205. <name>yarn.resourcemanager.fs.rm-state-store.uri</name>
  206. <value>${hadoop.tmp.dir}/yarn/system/rmstore</value>
  207. <!--value>hdfs://localhost:9000/rmstore</value-->
  208. </property>
  209. <property>
  210. <description>The maximum number of completed applications RM keeps. </description>
  211. <name>yarn.resourcemanager.max-completed-applications</name>
  212. <value>10000</value>
  213. </property>
  214. <property>
  215. <description>Interval at which the delayed token removal thread runs</description>
  216. <name>yarn.resourcemanager.delayed.delegation-token.removal-interval-ms</name>
  217. <value>30000</value>
  218. </property>
  219. <property>
  220. <description>Interval for the roll over for the master key used to generate
  221. application tokens
  222. </description>
  223. <name>yarn.resourcemanager.application-tokens.master-key-rolling-interval-secs</name>
  224. <value>86400</value>
  225. </property>
  226. <property>
  227. <description>Interval for the roll over for the master key used to generate
  228. container tokens. It is expected to be much greater than
  229. yarn.nm.liveness-monitor.expiry-interval-ms and
  230. yarn.rm.container-allocation.expiry-interval-ms. Otherwise the
  231. behavior is undefined.
  232. </description>
  233. <name>yarn.resourcemanager.container-tokens.master-key-rolling-interval-secs</name>
  234. <value>86400</value>
  235. </property>
  236. <property>
  237. <description>The heart-beat interval in milliseconds for every NodeManager in the cluster.</description>
  238. <name>yarn.resourcemanager.nodemanagers.heartbeat-interval-ms</name>
  239. <value>1000</value>
  240. </property>
  241. <!-- Node Manager Configs -->
  242. <property>
  243. <description>The address of the container manager in the NM.</description>
  244. <name>yarn.nodemanager.address</name>
  245. <value>0.0.0.0:0</value>
  246. </property>
  247. <property>
  248. <description>Environment variables that should be forwarded from the NodeManager's environment to the container's.</description>
  249. <name>yarn.nodemanager.admin-env</name>
  250. <value>MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX</value>
  251. </property>
  252. <property>
  253. <description>Environment variables that containers may override rather than use NodeManager's default.</description>
  254. <name>yarn.nodemanager.env-whitelist</name>
  255. <value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,HADOOP_YARN_HOME</value>
  256. </property>
  257. <property>
  258. <description>who will execute(launch) the containers.</description>
  259. <name>yarn.nodemanager.container-executor.class</name>
  260. <value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value>
  261. <!--<value>org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor</value>-->
  262. </property>
  263. <property>
  264. <description>Number of threads container manager uses.</description>
  265. <name>yarn.nodemanager.container-manager.thread-count</name>
  266. <value>20</value>
  267. </property>
  268. <property>
  269. <description>Number of threads used in cleanup.</description>
  270. <name>yarn.nodemanager.delete.thread-count</name>
  271. <value>4</value>
  272. </property>
  273. <property>
  274. <description>
  275. Number of seconds after an application finishes before the nodemanager's
  276. DeletionService will delete the application's localized file directory
  277. and log directory.
  278. To diagnose Yarn application problems, set this property's value large
  279. enough (for example, to 600 = 10 minutes) to permit examination of these
  280. directories. After changing the property's value, you must restart the
  281. nodemanager in order for it to have an effect.
  282. The roots of Yarn applications' work directories is configurable with
  283. the yarn.nodemanager.local-dirs property (see below), and the roots
  284. of the Yarn applications' log directories is configurable with the
  285. yarn.nodemanager.log-dirs property (see also below).
  286. </description>
  287. <name>yarn.nodemanager.delete.debug-delay-sec</name>
  288. <value>0</value>
  289. </property>
  290. <property>
  291. <description>Keytab for NM.</description>
  292. <name>yarn.nodemanager.keytab</name>
  293. <value>/etc/krb5.keytab</value>
  294. </property>
  295. <property>
  296. <description>List of directories to store localized files in. An
  297. application's localized file directory will be found in:
  298. ${yarn.nodemanager.local-dirs}/usercache/${user}/appcache/application_${appid}.
  299. Individual containers' work directories, called container_${contid}, will
  300. be subdirectories of this.
  301. </description>
  302. <name>yarn.nodemanager.local-dirs</name>
  303. <value>${hadoop.tmp.dir}/nm-local-dir</value>
  304. </property>
  305. <property>
  306. <description>Address where the localizer IPC is.</description>
  307. <name>yarn.nodemanager.localizer.address</name>
  308. <value>0.0.0.0:8040</value>
  309. </property>
  310. <property>
  311. <description>Interval in between cache cleanups.</description>
  312. <name>yarn.nodemanager.localizer.cache.cleanup.interval-ms</name>
  313. <value>600000</value>
  314. </property>
  315. <property>
  316. <description>Target size of localizer cache in MB, per local directory.</description>
  317. <name>yarn.nodemanager.localizer.cache.target-size-mb</name>
  318. <value>10240</value>
  319. </property>
  320. <property>
  321. <description>Number of threads to handle localization requests.</description>
  322. <name>yarn.nodemanager.localizer.client.thread-count</name>
  323. <value>5</value>
  324. </property>
  325. <property>
  326. <description>Number of threads to use for localization fetching.</description>
  327. <name>yarn.nodemanager.localizer.fetch.thread-count</name>
  328. <value>4</value>
  329. </property>
  330. <property>
  331. <description>
  332. Where to store container logs. An application's localized log directory
  333. will be found in ${yarn.nodemanager.log-dirs}/application_${appid}.
  334. Individual containers' log directories will be below this, in directories
  335. named container_{$contid}. Each container directory will contain the files
  336. stderr, stdin, and syslog generated by that container.
  337. </description>
  338. <name>yarn.nodemanager.log-dirs</name>
  339. <value>${yarn.log.dir}/userlogs</value>
  340. </property>
  341. <property>
  342. <description>Whether to enable log aggregation</description>
  343. <name>yarn.log-aggregation-enable</name>
  344. <value>false</value>
  345. </property>
  346. <property>
  347. <description>How long to keep aggregation logs before deleting them. -1 disables.
  348. Be careful set this too small and you will spam the name node.</description>
  349. <name>yarn.log-aggregation.retain-seconds</name>
  350. <value>-1</value>
  351. </property>
  352. <property>
  353. <description>How long to wait between aggregated log retention checks.
  354. If set to 0 or a negative value then the value is computed as one-tenth
  355. of the aggregated log retention time. Be careful set this too small and
  356. you will spam the name node.</description>
  357. <name>yarn.log-aggregation.retain-check-interval-seconds</name>
  358. <value>-1</value>
  359. </property>
  360. <property>
  361. <description>Time in seconds to retain user logs. Only applicable if
  362. log aggregation is disabled
  363. </description>
  364. <name>yarn.nodemanager.log.retain-seconds</name>
  365. <value>10800</value>
  366. </property>
  367. <property>
  368. <description>Where to aggregate logs to.</description>
  369. <name>yarn.nodemanager.remote-app-log-dir</name>
  370. <value>/tmp/logs</value>
  371. </property>
  372. <property>
  373. <description>The remote log dir will be created at
  374. {yarn.nodemanager.remote-app-log-dir}/${user}/{thisParam}
  375. </description>
  376. <name>yarn.nodemanager.remote-app-log-dir-suffix</name>
  377. <value>logs</value>
  378. </property>
  379. <property>
  380. <description>Amount of physical memory, in MB, that can be allocated
  381. for containers.</description>
  382. <name>yarn.nodemanager.resource.memory-mb</name>
  383. <value>8192</value>
  384. </property>
  385. <property>
  386. <description>Whether physical memory limits will be enforced for
  387. containers.</description>
  388. <name>yarn.nodemanager.pmem-check-enabled</name>
  389. <value>true</value>
  390. </property>
  391. <property>
  392. <description>Whether virtual memory limits will be enforced for
  393. containers.</description>
  394. <name>yarn.nodemanager.vmem-check-enabled</name>
  395. <value>true</value>
  396. </property>
  397. <property>
  398. <description>Ratio between virtual memory to physical memory when
  399. setting memory limits for containers. Container allocations are
  400. expressed in terms of physical memory, and virtual memory usage
  401. is allowed to exceed this allocation by this ratio.
  402. </description>
  403. <name>yarn.nodemanager.vmem-pmem-ratio</name>
  404. <value>2.1</value>
  405. </property>
  406. <property>
  407. <description>Number of CPU cores that can be allocated
  408. for containers.</description>
  409. <name>yarn.nodemanager.resource.cpu-cores</name>
  410. <value>8</value>
  411. </property>
  412. <property>
  413. <description>Ratio between virtual cores to physical cores when
  414. allocating CPU resources to containers.
  415. </description>
  416. <name>yarn.nodemanager.vcores-pcores-ratio</name>
  417. <value>2</value>
  418. </property>
  419. <property>
  420. <description>NM Webapp address.</description>
  421. <name>yarn.nodemanager.webapp.address</name>
  422. <value>0.0.0.0:8042</value>
  423. </property>
  424. <property>
  425. <description>How often to monitor containers.</description>
  426. <name>yarn.nodemanager.container-monitor.interval-ms</name>
  427. <value>3000</value>
  428. </property>
  429. <property>
  430. <description>Class that calculates containers current resource utilization.</description>
  431. <name>yarn.nodemanager.container-monitor.resource-calculator.class</name>
  432. </property>
  433. <property>
  434. <description>Frequency of running node health script.</description>
  435. <name>yarn.nodemanager.health-checker.interval-ms</name>
  436. <value>600000</value>
  437. </property>
  438. <property>
  439. <description>Script time out period.</description>
  440. <name>yarn.nodemanager.health-checker.script.timeout-ms</name>
  441. <value>1200000</value>
  442. </property>
  443. <property>
  444. <description>The health check script to run.</description>
  445. <name>yarn.nodemanager.health-checker.script.path</name>
  446. <value></value>
  447. </property>
  448. <property>
  449. <description>The arguments to pass to the health check script.</description>
  450. <name>yarn.nodemanager.health-checker.script.opts</name>
  451. <value></value>
  452. </property>
  453. <property>
  454. <description>Frequency of running disk health checker code.</description>
  455. <name>yarn.nodemanager.disk-health-checker.interval-ms</name>
  456. <value>120000</value>
  457. </property>
  458. <property>
  459. <description>The minimum fraction of number of disks to be healthy for the
  460. nodemanager to launch new containers. This correspond to both
  461. yarn-nodemanager.local-dirs and yarn.nodemanager.log-dirs. i.e. If there
  462. are less number of healthy local-dirs (or log-dirs) available, then
  463. new containers will not be launched on this node.</description>
  464. <name>yarn.nodemanager.disk-health-checker.min-healthy-disks</name>
  465. <value>0.25</value>
  466. </property>
  467. <property>
  468. <description>The path to the Linux container executor.</description>
  469. <name>yarn.nodemanager.linux-container-executor.path</name>
  470. </property>
  471. <property>
  472. <description>The class which should help the LCE handle resources.</description>
  473. <name>yarn.nodemanager.linux-container-executor.resources-handler.class</name>
  474. <value>org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler</value>
  475. <!-- <value>org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler</value> -->
  476. </property>
  477. <property>
  478. <description>The cgroups hierarchy under which to place YARN proccesses (cannot contain commas).
  479. If yarn.nodemanager.linux-container-executor.cgroups.mount is false (that is, if cgroups have
  480. been pre-configured), then this cgroups hierarchy must already exist and be writable by the
  481. NodeManager user, otherwise the NodeManager may fail.
  482. Only used when the LCE resources handler is set to the CgroupsLCEResourcesHandler.</description>
  483. <name>yarn.nodemanager.linux-container-executor.cgroups.hierarchy</name>
  484. <value>/hadoop-yarn</value>
  485. </property>
  486. <property>
  487. <description>Whether the LCE should attempt to mount cgroups if not found.
  488. Only used when the LCE resources handler is set to the CgroupsLCEResourcesHandler.</description>
  489. <name>yarn.nodemanager.linux-container-executor.cgroups.mount</name>
  490. <value>false</value>
  491. </property>
  492. <property>
  493. <description>Where the LCE should attempt to mount cgroups if not found. Common locations
  494. include /sys/fs/cgroup and /cgroup; the default location can vary depending on the Linux
  495. distribution in use. This path must exist before the NodeManager is launched.
  496. Only used when the LCE resources handler is set to the CgroupsLCEResourcesHandler, and
  497. yarn.nodemanager.linux-container-executor.cgroups.mount is true.</description>
  498. <name>yarn.nodemanager.linux-container-executor.cgroups.mount-path</name>
  499. </property>
  500. <property>
  501. <description>T-file compression types used to compress aggregated logs.</description>
  502. <name>yarn.nodemanager.log-aggregation.compression-type</name>
  503. <value>none</value>
  504. </property>
  505. <property>
  506. <description>The kerberos principal for the node manager.</description>
  507. <name>yarn.nodemanager.principal</name>
  508. <value></value>
  509. </property>
  510. <property>
  511. <name>yarn.nodemanager.aux-services</name>
  512. <value></value>
  513. <!-- <value>mapreduce.shuffle</value> -->
  514. </property>
  515. <property>
  516. <description>No. of ms to wait between sending a SIGTERM and SIGKILL to a container</description>
  517. <name>yarn.nodemanager.sleep-delay-before-sigkill.ms</name>
  518. <value>250</value>
  519. </property>
  520. <property>
  521. <description>Max time to wait for a process to come up when trying to cleanup a container</description>
  522. <name>yarn.nodemanager.process-kill-wait.ms</name>
  523. <value>2000</value>
  524. </property>
  525. <property>
  526. <description>Max time, in seconds, to wait to establish a connection to RM when NM starts.
  527. The NM will shutdown if it cannot connect to RM within the specified max time period.
  528. If the value is set as -1, then NM will retry forever.</description>
  529. <name>yarn.nodemanager.resourcemanager.connect.wait.secs</name>
  530. <value>900</value>
  531. </property>
  532. <property>
  533. <description>Time interval, in seconds, between each NM attempt to connect to RM.</description>
  534. <name>yarn.nodemanager.resourcemanager.connect.retry_interval.secs</name>
  535. <value>30</value>
  536. </property>
  537. <!--Map Reduce configuration-->
  538. <property>
  539. <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
  540. <value>org.apache.hadoop.mapred.ShuffleHandler</value>
  541. </property>
  542. <property>
  543. <name>mapreduce.job.jar</name>
  544. <value/>
  545. </property>
  546. <property>
  547. <name>mapreduce.job.hdfs-servers</name>
  548. <value>${fs.defaultFS}</value>
  549. </property>
  550. <!-- WebAppProxy Configuration-->
  551. <property>
  552. <description>The kerberos principal for the proxy, if the proxy is not
  553. running as part of the RM.</description>
  554. <name>yarn.web-proxy.principal</name>
  555. <value/>
  556. </property>
  557. <property>
  558. <description>Keytab for WebAppProxy, if the proxy is not running as part of
  559. the RM.</description>
  560. <name>yarn.web-proxy.keytab</name>
  561. </property>
  562. <property>
  563. <description>The address for the web proxy as HOST:PORT, if this is not
  564. given then the proxy will run as part of the RM</description>
  565. <name>yarn.web-proxy.address</name>
  566. <value/>
  567. </property>
  568. <!-- Applications' Configuration-->
  569. <property>
  570. <description>CLASSPATH for YARN applications. A comma-separated list
  571. of CLASSPATH entries</description>
  572. <name>yarn.application.classpath</name>
  573. <value>$HADOOP_CONF_DIR,$HADOOP_COMMON_HOME/share/hadoop/common/*,$HADOOP_COMMON_HOME/share/hadoop/common/lib/*,$HADOOP_HDFS_HOME/share/hadoop/hdfs/*,$HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*,$HADOOP_YARN_HOME/share/hadoop/yarn/*,$HADOOP_YARN_HOME/share/hadoop/yarn/lib/*</value>
  574. </property>
  575. </configuration>