hadoop-default.xml 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398
  1. <?xml version="1.0"?>
  2. <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
  3. <!-- Do not modify this file directly. Instead, copy entries that you -->
  4. <!-- wish to modify from this file into hadoop-site.xml and change them -->
  5. <!-- there. If hadoop-site.xml does not already exist, create it. -->
  6. <configuration>
  7. <!--- logging properties -->
  8. <property>
  9. <name>hadoop.logfile.size</name>
  10. <value>10000000</value>
  11. <description>The max size of each log file</description>
  12. </property>
  13. <property>
  14. <name>hadoop.logfile.count</name>
  15. <value>10</value>
  16. <description>The max number of log files</description>
  17. </property>
  18. <property>
  19. <name>dfs.namenode.logging.level</name>
  20. <value>info</value>
  21. <description>The logging level for dfs namenode. Other values are "dir"(trac
  22. e namespace mutations), "block"(trace block under/over replications and block
  23. creations/deletions), or "all".</description>
  24. </property>
  25. <!-- i/o properties -->
  26. <property>
  27. <name>io.sort.factor</name>
  28. <value>10</value>
  29. <description>The number of streams to merge at once while sorting
  30. files. This determines the number of open file handles.</description>
  31. </property>
  32. <property>
  33. <name>io.sort.mb</name>
  34. <value>100</value>
  35. <description>The total amount of buffer memory to use while sorting
  36. files, in megabytes. By default, gives each merge stream 1MB, which
  37. should minimize seeks.</description>
  38. </property>
  39. <property>
  40. <name>io.file.buffer.size</name>
  41. <value>4096</value>
  42. <description>The size of buffer for use in sequence files.
  43. The size of this buffer should probably be a multiple of hardware
  44. page size (4096 on Intel x86), and it determines how much data is
  45. buffered during read and write operations.</description>
  46. </property>
  47. <property>
  48. <name>io.bytes.per.checksum</name>
  49. <value>512</value>
  50. <description>The number of bytes per checksum. Must not be larger than
  51. io.file.buffer.size.</description>
  52. </property>
  53. <property>
  54. <name>io.skip.checksum.errors</name>
  55. <value>false</value>
  56. <description>If true, when a checksum error is encountered while
  57. reading a sequence file, entries are skipped, instead of throwing an
  58. exception.</description>
  59. </property>
  60. <property>
  61. <name>io.map.index.skip</name>
  62. <value>0</value>
  63. <description>Number of index entries to skip between each entry.
  64. Zero by default. Setting this to values larger than zero can
  65. facilitate opening large map files using less memory.</description>
  66. </property>
  67. <!-- file system properties -->
  68. <property>
  69. <name>fs.default.name</name>
  70. <value>local</value>
  71. <description>The name of the default file system. Either the
  72. literal string "local" or a host:port for DFS.</description>
  73. </property>
  74. <property>
  75. <name>dfs.datanode.port</name>
  76. <value>50010</value>
  77. <description>The port number that the dfs datanode server uses as a starting
  78. point to look for a free port to listen on.
  79. </description>
  80. </property>
  81. <property>
  82. <name>dfs.info.port</name>
  83. <value>50070</value>
  84. <description>The base port number for the dfs namenode web ui.
  85. </description>
  86. </property>
  87. <property>
  88. <name>dfs.datanode.du.reserved</name>
  89. <value>0</value>
  90. <description>Reserved space in bytes. Always leave this much space free for non dfs use
  91. </description>
  92. </property>
  93. <property>
  94. <name>dfs.datanode.du.pct</name>
  95. <value>0.98f</value>
  96. <description>When calculating remaining space, only use this percentage of the real available space
  97. </description>
  98. </property>
  99. <property>
  100. <name>dfs.name.dir</name>
  101. <value>/tmp/hadoop/dfs/name</value>
  102. <description>Determines where on the local filesystem the DFS name node
  103. should store the name table.</description>
  104. </property>
  105. <property>
  106. <name>dfs.data.dir</name>
  107. <value>/tmp/hadoop/dfs/data</value>
  108. <description>Determines where on the local filesystem an DFS data node
  109. should store its blocks. If this is a comma-delimited
  110. list of directories, then data will be stored in all named
  111. directories, typically on different devices.
  112. Directories that do not exist are ignored.
  113. </description>
  114. </property>
  115. <property>
  116. <name>dfs.replication</name>
  117. <value>3</value>
  118. <description>Default block replication.
  119. The actual number of replications can be specified when the file is created.
  120. The default is used if replication is not specified in create time.
  121. </description>
  122. </property>
  123. <property>
  124. <name>dfs.replication.max</name>
  125. <value>512</value>
  126. <description>Maximal block replication.
  127. </description>
  128. </property>
  129. <property>
  130. <name>dfs.replication.min</name>
  131. <value>1</value>
  132. <description>Minimal block replication.
  133. </description>
  134. </property>
  135. <property>
  136. <name>dfs.block.size</name>
  137. <value>67108864</value>
  138. <description>The default block size for new files.</description>
  139. </property>
  140. <property>
  141. <name>dfs.df.interval</name>
  142. <value>3000</value>
  143. <description>Disk usage statistics refresh interval in msec.</description>
  144. </property>
  145. <property>
  146. <name>dfs.client.block.write.retries</name>
  147. <value>3</value>
  148. <description>The number of retries for writing blocks to the data nodes,
  149. before we signal failure to the application.
  150. </description>
  151. </property>
  152. <!-- map/reduce properties -->
  153. <property>
  154. <name>mapred.job.tracker</name>
  155. <value>local</value>
  156. <description>The host and port that the MapReduce job tracker runs
  157. at. If "local", then jobs are run in-process as a single map
  158. and reduce task.
  159. </description>
  160. </property>
  161. <property>
  162. <name>mapred.job.tracker.info.port</name>
  163. <value>50030</value>
  164. <description>The port that the MapReduce job tracker info webserver runs at.
  165. </description>
  166. </property>
  167. <property>
  168. <name>mapred.task.tracker.output.port</name>
  169. <value>50040</value>
  170. <description>The port number that the MapReduce task tracker output server uses as a starting
  171. point to look for a free port to listen on.
  172. </description>
  173. </property>
  174. <property>
  175. <name>mapred.task.tracker.report.port</name>
  176. <value>50050</value>
  177. <description>The port number that the MapReduce task tracker report server uses as a starting
  178. point to look for a free port to listen on.
  179. </description>
  180. </property>
  181. <property>
  182. <name>mapred.local.dir</name>
  183. <value>/tmp/hadoop/mapred/local</value>
  184. <description>The local directory where MapReduce stores intermediate
  185. data files. May be a comma-separated list of
  186. directories on different devices in order to spread disk i/o.
  187. Directories that do not exist are ignored.
  188. </description>
  189. </property>
  190. <property>
  191. <name>mapred.system.dir</name>
  192. <value>/tmp/hadoop/mapred/system</value>
  193. <description>The shared directory where MapReduce stores control files.
  194. </description>
  195. </property>
  196. <property>
  197. <name>mapred.temp.dir</name>
  198. <value>/tmp/hadoop/mapred/temp</value>
  199. <description>A shared directory for temporary files.
  200. </description>
  201. </property>
  202. <property>
  203. <name>mapred.local.dir.minspacestart</name>
  204. <value>0</value>
  205. <description>If the space in mapred.local.dir drops under this,
  206. do not ask for more tasks.
  207. Value in bytes.
  208. </description>
  209. </property>
  210. <property>
  211. <name>mapred.local.dir.minspacekill</name>
  212. <value>0</value>
  213. <description>If the space in mapred.local.dir drops under this,
  214. do not ask more tasks until all the current ones have finished and
  215. cleaned up. Also, to save the rest of the tasks we have running,
  216. kill one of them, to clean up some space. Start with the reduce tasks,
  217. then go with the ones that have finished the least.
  218. Value in bytes.
  219. </description>
  220. </property>
  221. <property>
  222. <name>mapred.map.tasks</name>
  223. <value>2</value>
  224. <description>The default number of map tasks per job. Typically set
  225. to a prime several times greater than number of available hosts.
  226. Ignored when mapred.job.tracker is "local".
  227. </description>
  228. </property>
  229. <property>
  230. <name>mapred.reduce.tasks</name>
  231. <value>1</value>
  232. <description>The default number of reduce tasks per job. Typically set
  233. to a prime close to the number of available hosts. Ignored when
  234. mapred.job.tracker is "local".
  235. </description>
  236. </property>
  237. <property>
  238. <name>mapred.reduce.parallel.copies</name>
  239. <value>5</value>
  240. <description>The default number of parallel transfers run by reduce
  241. during the copy(shuffle) phase.
  242. </description>
  243. </property>
  244. <property>
  245. <name>mapred.task.timeout</name>
  246. <value>600000</value>
  247. <description>The number of milliseconds before a task will be
  248. terminated if it neither reads an input, writes an output, nor
  249. updates its status string.
  250. </description>
  251. </property>
  252. <property>
  253. <name>mapred.tasktracker.tasks.maximum</name>
  254. <value>2</value>
  255. <description>The maximum number of tasks that will be run
  256. simultaneously by a task tracker.
  257. </description>
  258. </property>
  259. <property>
  260. <name>mapred.child.java.opts</name>
  261. <value>-Xmx200m</value>
  262. <description>Java opts for the task tracker child processes. Subsumes
  263. 'mapred.child.heap.size' (If a mapred.child.heap.size value is found
  264. in a configuration, its maximum heap size will be used and a warning
  265. emitted that heap.size has been deprecated). Also, the following symbols,
  266. if present, will be interpolated: @taskid@ is replaced by current TaskID;
  267. and @port@ will be replaced by mapred.task.tracker.report.port + 1 (A second
  268. child will fail with a port-in-use if mapred.tasktracker.tasks.maximum is
  269. greater than one). Any other occurrences of '@' will go unchanged. For
  270. example, to enable verbose gc logging to a file named for the taskid in
  271. /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
  272. -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
  273. </description>
  274. </property>
  275. <property>
  276. <name>mapred.combine.buffer.size</name>
  277. <value>100000</value>
  278. <description>The number of entries the combining collector caches before
  279. combining them and writing to disk.</description>
  280. </property>
  281. <property>
  282. <name>mapred.speculative.execution</name>
  283. <value>true</value>
  284. <description>If true, then multiple instances of some map tasks may
  285. be executed in parallel.</description>
  286. </property>
  287. <property>
  288. <name>mapred.min.split.size</name>
  289. <value>0</value>
  290. <description>The minimum size chunk that map input should be split
  291. into. Note that some file formats may have minimum split sizes that
  292. take priority over this setting.</description>
  293. </property>
  294. <property>
  295. <name>mapred.submit.replication</name>
  296. <value>10</value>
  297. <description>The replication level for submitted job files. This
  298. should be around the square root of the number of nodes.
  299. </description>
  300. </property>
  301. <property>
  302. <name>tasktracker.http.threads</name>
  303. <value>40</value>
  304. <description>The number of worker threads that for the http server. This is
  305. used for map output fetching
  306. </description>
  307. </property>
  308. <property>
  309. <name>tasktracker.http.port</name>
  310. <value>50060</value>
  311. <description>The default port for task trackers to use as their http server.
  312. </description>
  313. </property>
  314. <!-- ipc properties -->
  315. <property>
  316. <name>ipc.client.timeout</name>
  317. <value>60000</value>
  318. <description>Defines the timeout for IPC calls in milliseconds.</description>
  319. </property>
  320. <property>
  321. <name>ipc.client.idlethreshold</name>
  322. <value>4000</value>
  323. <description>Defines the threshold numner of connections after which
  324. connections will be inspected for idleness.
  325. </description>
  326. </property>
  327. <property>
  328. <name>ipc.client.maxidletime</name>
  329. <value>120000</value>
  330. <description>Defines the maximum idle time for a connected client after
  331. which it may be disconnected.
  332. </description>
  333. </property>
  334. <property>
  335. <name>ipc.client.kill.max</name>
  336. <value>10</value>
  337. <description>Defines the maximum number of clients to disconnect in one go.
  338. </description>
  339. </property>
  340. </configuration>