hadoop-default.html 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618
  1. <html>
  2. <body>
  3. <table border="1">
  4. <tr>
  5. <td>name</td><td>value</td><td>description</td>
  6. </tr>
  7. <tr>
  8. <td><a name="hadoop.tmp.dir">hadoop.tmp.dir</a></td><td>/tmp/hadoop-${user.name}</td><td>A base for other temporary directories.</td>
  9. </tr>
  10. <tr>
  11. <td><a name="hadoop.native.lib">hadoop.native.lib</a></td><td>true</td><td>Should native hadoop libraries, if present, be used.</td>
  12. </tr>
  13. <tr>
  14. <td><a name="hadoop.logfile.size">hadoop.logfile.size</a></td><td>10000000</td><td>The max size of each log file</td>
  15. </tr>
  16. <tr>
  17. <td><a name="hadoop.logfile.count">hadoop.logfile.count</a></td><td>10</td><td>The max number of log files</td>
  18. </tr>
  19. <tr>
  20. <td><a name="dfs.namenode.logging.level">dfs.namenode.logging.level</a></td><td>info</td><td>The logging level for dfs namenode. Other values are "dir"(trac
  21. e namespace mutations), "block"(trace block under/over replications and block
  22. creations/deletions), or "all".</td>
  23. </tr>
  24. <tr>
  25. <td><a name="io.sort.factor">io.sort.factor</a></td><td>10</td><td>The number of streams to merge at once while sorting
  26. files. This determines the number of open file handles.</td>
  27. </tr>
  28. <tr>
  29. <td><a name="io.sort.mb">io.sort.mb</a></td><td>100</td><td>The total amount of buffer memory to use while sorting
  30. files, in megabytes. By default, gives each merge stream 1MB, which
  31. should minimize seeks.</td>
  32. </tr>
  33. <tr>
  34. <td><a name="io.file.buffer.size">io.file.buffer.size</a></td><td>4096</td><td>The size of buffer for use in sequence files.
  35. The size of this buffer should probably be a multiple of hardware
  36. page size (4096 on Intel x86), and it determines how much data is
  37. buffered during read and write operations.</td>
  38. </tr>
  39. <tr>
  40. <td><a name="io.bytes.per.checksum">io.bytes.per.checksum</a></td><td>512</td><td>The number of bytes per checksum. Must not be larger than
  41. io.file.buffer.size.</td>
  42. </tr>
  43. <tr>
  44. <td><a name="io.skip.checksum.errors">io.skip.checksum.errors</a></td><td>false</td><td>If true, when a checksum error is encountered while
  45. reading a sequence file, entries are skipped, instead of throwing an
  46. exception.</td>
  47. </tr>
  48. <tr>
  49. <td><a name="io.map.index.skip">io.map.index.skip</a></td><td>0</td><td>Number of index entries to skip between each entry.
  50. Zero by default. Setting this to values larger than zero can
  51. facilitate opening large map files using less memory.</td>
  52. </tr>
  53. <tr>
  54. <td><a name="io.compression.codecs">io.compression.codecs</a></td><td>org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec</td><td>A list of the compression codec classes that can be used
  55. for compression/decompression.</td>
  56. </tr>
  57. <tr>
  58. <td><a name="fs.default.name">fs.default.name</a></td><td>file:///</td><td>The name of the default file system. A URI whose
  59. scheme and authority determine the FileSystem implementation. The
  60. uri's scheme determines the config property (fs.SCHEME.impl) naming
  61. the FileSystem implementation class. The uri's authority is used to
  62. determine the host, port, etc. for a filesystem.</td>
  63. </tr>
  64. <tr>
  65. <td><a name="fs.trash.root">fs.trash.root</a></td><td>${hadoop.tmp.dir}/Trash</td><td>The trash directory, used by FsShell's 'rm' command.
  66. </td>
  67. </tr>
  68. <tr>
  69. <td><a name="fs.trash.interval">fs.trash.interval</a></td><td>0</td><td>Number of minutes between trash checkpoints.
  70. If zero, the trash feature is disabled.
  71. </td>
  72. </tr>
  73. <tr>
  74. <td><a name="fs.file.impl">fs.file.impl</a></td><td>org.apache.hadoop.fs.LocalFileSystem</td><td>The FileSystem for file: uris.</td>
  75. </tr>
  76. <tr>
  77. <td><a name="fs.hdfs.impl">fs.hdfs.impl</a></td><td>org.apache.hadoop.dfs.DistributedFileSystem</td><td>The FileSystem for hdfs: uris.</td>
  78. </tr>
  79. <tr>
  80. <td><a name="fs.s3.impl">fs.s3.impl</a></td><td>org.apache.hadoop.fs.s3.S3FileSystem</td><td>The FileSystem for s3: uris.</td>
  81. </tr>
  82. <tr>
  83. <td><a name="fs.kfs.impl">fs.kfs.impl</a></td><td>org.apache.hadoop.fs.kfs.KosmosFileSystem</td><td>The FileSystem for kfs: uris.</td>
  84. </tr>
  85. <tr>
  86. <td><a name="fs.hftp.impl">fs.hftp.impl</a></td><td>org.apache.hadoop.dfs.HftpFileSystem</td><td></td>
  87. </tr>
  88. <tr>
  89. <td><a name="fs.ramfs.impl">fs.ramfs.impl</a></td><td>org.apache.hadoop.fs.InMemoryFileSystem</td><td>The FileSystem for ramfs: uris.</td>
  90. </tr>
  91. <tr>
  92. <td><a name="fs.inmemory.size.mb">fs.inmemory.size.mb</a></td><td>75</td><td>The size of the in-memory filsystem instance in MB</td>
  93. </tr>
  94. <tr>
  95. <td><a name="fs.checkpoint.dir">fs.checkpoint.dir</a></td><td>${hadoop.tmp.dir}/dfs/namesecondary</td><td>Determines where on the local filesystem the DFS secondary
  96. name node should store the temporary images and edits to merge.
  97. </td>
  98. </tr>
  99. <tr>
  100. <td><a name="fs.checkpoint.period">fs.checkpoint.period</a></td><td>3600</td><td>The number of seconds between two periodic checkpoints.
  101. </td>
  102. </tr>
  103. <tr>
  104. <td><a name="fs.checkpoint.size">fs.checkpoint.size</a></td><td>67108864</td><td>The size of the current edit log (in bytes) that triggers
  105. a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
  106. </td>
  107. </tr>
  108. <tr>
  109. <td><a name="dfs.secondary.http.bindAddress">dfs.secondary.http.bindAddress</a></td><td>0.0.0.0:50090</td><td>
  110. The secondary namenode http server bind address and port.
  111. If the port is 0 then the server will start on a free port.
  112. </td>
  113. </tr>
  114. <tr>
  115. <td><a name="dfs.datanode.bindAddress">dfs.datanode.bindAddress</a></td><td>0.0.0.0:50010</td><td>
  116. The address where the datanode will listen to.
  117. If the port is 0 then the server will start on a free port.
  118. </td>
  119. </tr>
  120. <tr>
  121. <td><a name="dfs.datanode.http.bindAddress">dfs.datanode.http.bindAddress</a></td><td>0.0.0.0:50075</td><td>
  122. The datanode http server bind address and port.
  123. If the port is 0 then the server will start on a free port.
  124. </td>
  125. </tr>
  126. <tr>
  127. <td><a name="dfs.http.bindAddress">dfs.http.bindAddress</a></td><td>0.0.0.0:50070</td><td>
  128. The address and the base port where the dfs namenode web ui will listen on.
  129. If the port is 0 then the server will start on a free port.
  130. </td>
  131. </tr>
  132. <tr>
  133. <td><a name="dfs.datanode.dns.interface">dfs.datanode.dns.interface</a></td><td>default</td><td>The name of the Network Interface from which a data node should
  134. report its IP address.
  135. </td>
  136. </tr>
  137. <tr>
  138. <td><a name="dfs.datanode.dns.nameserver">dfs.datanode.dns.nameserver</a></td><td>default</td><td>The host name or IP address of the name server (DNS)
  139. which a DataNode should use to determine the host name used by the
  140. NameNode for communication and display purposes.
  141. </td>
  142. </tr>
  143. <tr>
  144. <td><a name="dfs.replication.considerLoad">dfs.replication.considerLoad</a></td><td>true</td><td>Decide if chooseTarget considers the target's load or not
  145. </td>
  146. </tr>
  147. <tr>
  148. <td><a name="dfs.default.chunk.view.size">dfs.default.chunk.view.size</a></td><td>32768</td><td>The number of bytes to view for a file on the browser.
  149. </td>
  150. </tr>
  151. <tr>
  152. <td><a name="dfs.datanode.du.reserved">dfs.datanode.du.reserved</a></td><td>0</td><td>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
  153. </td>
  154. </tr>
  155. <tr>
  156. <td><a name="dfs.datanode.du.pct">dfs.datanode.du.pct</a></td><td>0.98f</td><td>When calculating remaining space, only use this percentage of the real available space
  157. </td>
  158. </tr>
  159. <tr>
  160. <td><a name="dfs.name.dir">dfs.name.dir</a></td><td>${hadoop.tmp.dir}/dfs/name</td><td>Determines where on the local filesystem the DFS name node
  161. should store the name table. If this is a comma-delimited list
  162. of directories then the name table is replicated in all of the
  163. directories, for redundancy. </td>
  164. </tr>
  165. <tr>
  166. <td><a name="dfs.permissions">dfs.permissions</a></td><td>true</td><td>
  167. If "true", enable permission checking in HDFS.
  168. If "false", permission checking is turned off,
  169. but all other behavior is unchanged.
  170. Switching from one parameter value to the other does not change the mode,
  171. owner or group of files or directories.
  172. </td>
  173. </tr>
  174. <tr>
  175. <td><a name="dfs.permissions.supergroup">dfs.permissions.supergroup</a></td><td>supergroup</td><td>The name of the group of super-users.</td>
  176. </tr>
  177. <tr>
  178. <td><a name="dfs.client.buffer.dir">dfs.client.buffer.dir</a></td><td>${hadoop.tmp.dir}/dfs/tmp</td><td>Determines where on the local filesystem an DFS client
  179. should store its blocks before it sends them to the datanode.
  180. </td>
  181. </tr>
  182. <tr>
  183. <td><a name="dfs.data.dir">dfs.data.dir</a></td><td>${hadoop.tmp.dir}/dfs/data</td><td>Determines where on the local filesystem an DFS data node
  184. should store its blocks. If this is a comma-delimited
  185. list of directories, then data will be stored in all named
  186. directories, typically on different devices.
  187. Directories that do not exist are ignored.
  188. </td>
  189. </tr>
  190. <tr>
  191. <td><a name="dfs.replication">dfs.replication</a></td><td>3</td><td>Default block replication.
  192. The actual number of replications can be specified when the file is created.
  193. The default is used if replication is not specified in create time.
  194. </td>
  195. </tr>
  196. <tr>
  197. <td><a name="dfs.replication.max">dfs.replication.max</a></td><td>512</td><td>Maximal block replication.
  198. </td>
  199. </tr>
  200. <tr>
  201. <td><a name="dfs.replication.min">dfs.replication.min</a></td><td>1</td><td>Minimal block replication.
  202. </td>
  203. </tr>
  204. <tr>
  205. <td><a name="dfs.block.size">dfs.block.size</a></td><td>67108864</td><td>The default block size for new files.</td>
  206. </tr>
  207. <tr>
  208. <td><a name="dfs.df.interval">dfs.df.interval</a></td><td>60000</td><td>Disk usage statistics refresh interval in msec.</td>
  209. </tr>
  210. <tr>
  211. <td><a name="dfs.client.block.write.retries">dfs.client.block.write.retries</a></td><td>3</td><td>The number of retries for writing blocks to the data nodes,
  212. before we signal failure to the application.
  213. </td>
  214. </tr>
  215. <tr>
  216. <td><a name="dfs.blockreport.intervalMsec">dfs.blockreport.intervalMsec</a></td><td>3600000</td><td>Determines block reporting interval in milliseconds.</td>
  217. </tr>
  218. <tr>
  219. <td><a name="dfs.heartbeat.interval">dfs.heartbeat.interval</a></td><td>3</td><td>Determines datanode heartbeat interval in seconds.</td>
  220. </tr>
  221. <tr>
  222. <td><a name="dfs.namenode.handler.count">dfs.namenode.handler.count</a></td><td>10</td><td>The number of server threads for the namenode.</td>
  223. </tr>
  224. <tr>
  225. <td><a name="dfs.safemode.threshold.pct">dfs.safemode.threshold.pct</a></td><td>0.999f</td><td>
  226. Specifies the percentage of blocks that should satisfy
  227. the minimal replication requirement defined by dfs.replication.min.
  228. Values less than or equal to 0 mean not to start in safe mode.
  229. Values greater than 1 will make safe mode permanent.
  230. </td>
  231. </tr>
  232. <tr>
  233. <td><a name="dfs.safemode.extension">dfs.safemode.extension</a></td><td>30000</td><td>
  234. Determines extension of safe mode in milliseconds
  235. after the threshold level is reached.
  236. </td>
  237. </tr>
  238. <tr>
  239. <td><a name="dfs.network.script">dfs.network.script</a></td><td></td><td>
  240. Specifies a script name that print the network location path
  241. of the current machine.
  242. </td>
  243. </tr>
  244. <tr>
  245. <td><a name="dfs.balance.bandwidthPerSec">dfs.balance.bandwidthPerSec</a></td><td>1048576</td><td>
  246. Specifies the maximum amount of bandwidth that each datanode
  247. can utilize for the balancing purpose in term of
  248. the number of bytes per second.
  249. </td>
  250. </tr>
  251. <tr>
  252. <td><a name="dfs.hosts">dfs.hosts</a></td><td></td><td>Names a file that contains a list of hosts that are
  253. permitted to connect to the namenode. The full pathname of the file
  254. must be specified. If the value is empty, all hosts are
  255. permitted.</td>
  256. </tr>
  257. <tr>
  258. <td><a name="dfs.hosts.exclude">dfs.hosts.exclude</a></td><td></td><td>Names a file that contains a list of hosts that are
  259. not permitted to connect to the namenode. The full pathname of the
  260. file must be specified. If the value is empty, no hosts are
  261. excluded.</td>
  262. </tr>
  263. <tr>
  264. <td><a name="dfs.max.objects">dfs.max.objects</a></td><td>0</td><td>The maximum number of files, directories and blocks
  265. dfs supports. A value of zero indicates no limit to the number
  266. of objects that dfs supports.
  267. </td>
  268. </tr>
  269. <tr>
  270. <td><a name="fs.s3.block.size">fs.s3.block.size</a></td><td>67108864</td><td>Block size to use when writing files to S3.</td>
  271. </tr>
  272. <tr>
  273. <td><a name="fs.s3.buffer.dir">fs.s3.buffer.dir</a></td><td>${hadoop.tmp.dir}/s3</td><td>Determines where on the local filesystem the S3 filesystem
  274. should store its blocks before it sends them to S3
  275. or after it retrieves them from S3.
  276. </td>
  277. </tr>
  278. <tr>
  279. <td><a name="fs.s3.maxRetries">fs.s3.maxRetries</a></td><td>4</td><td>The maximum number of retries for reading or writing blocks to S3,
  280. before we signal failure to the application.
  281. </td>
  282. </tr>
  283. <tr>
  284. <td><a name="fs.s3.sleepTimeSeconds">fs.s3.sleepTimeSeconds</a></td><td>10</td><td>The number of seconds to sleep between each S3 retry.
  285. </td>
  286. </tr>
  287. <tr>
  288. <td><a name="mapred.job.tracker">mapred.job.tracker</a></td><td>local</td><td>The host and port that the MapReduce job tracker runs
  289. at. If "local", then jobs are run in-process as a single map
  290. and reduce task.
  291. </td>
  292. </tr>
  293. <tr>
  294. <td><a name="mapred.job.tracker.http.bindAddress">mapred.job.tracker.http.bindAddress</a></td><td>0.0.0.0:50030</td><td>
  295. The job tracker http server bind address and port.
  296. If the port is 0 then the server will start on a free port.
  297. </td>
  298. </tr>
  299. <tr>
  300. <td><a name="mapred.job.tracker.handler.count">mapred.job.tracker.handler.count</a></td><td>10</td><td>
  301. The number of server threads for the JobTracker. This should be roughly
  302. 4% of the number of tasktracker nodes.
  303. </td>
  304. </tr>
  305. <tr>
  306. <td><a name="mapred.task.tracker.report.bindAddress">mapred.task.tracker.report.bindAddress</a></td><td>127.0.0.1:0</td><td>The interface that task processes use to communicate
  307. with their parent tasktracker process.</td>
  308. </tr>
  309. <tr>
  310. <td><a name="mapred.local.dir">mapred.local.dir</a></td><td>${hadoop.tmp.dir}/mapred/local</td><td>The local directory where MapReduce stores intermediate
  311. data files. May be a comma-separated list of
  312. directories on different devices in order to spread disk i/o.
  313. Directories that do not exist are ignored.
  314. </td>
  315. </tr>
  316. <tr>
  317. <td><a name="local.cache.size">local.cache.size</a></td><td>10737418240</td><td>The limit on the size of cache you want to keep, set by default
  318. to 10GB. This will act as a soft limit on the cache directory for out of band data.
  319. </td>
  320. </tr>
  321. <tr>
  322. <td><a name="mapred.system.dir">mapred.system.dir</a></td><td>${hadoop.tmp.dir}/mapred/system</td><td>The shared directory where MapReduce stores control files.
  323. </td>
  324. </tr>
  325. <tr>
  326. <td><a name="mapred.temp.dir">mapred.temp.dir</a></td><td>${hadoop.tmp.dir}/mapred/temp</td><td>A shared directory for temporary files.
  327. </td>
  328. </tr>
  329. <tr>
  330. <td><a name="mapred.local.dir.minspacestart">mapred.local.dir.minspacestart</a></td><td>0</td><td>If the space in mapred.local.dir drops under this,
  331. do not ask for more tasks.
  332. Value in bytes.
  333. </td>
  334. </tr>
  335. <tr>
  336. <td><a name="mapred.local.dir.minspacekill">mapred.local.dir.minspacekill</a></td><td>0</td><td>If the space in mapred.local.dir drops under this,
  337. do not ask more tasks until all the current ones have finished and
  338. cleaned up. Also, to save the rest of the tasks we have running,
  339. kill one of them, to clean up some space. Start with the reduce tasks,
  340. then go with the ones that have finished the least.
  341. Value in bytes.
  342. </td>
  343. </tr>
  344. <tr>
  345. <td><a name="mapred.tasktracker.expiry.interval">mapred.tasktracker.expiry.interval</a></td><td>600000</td><td>Expert: The time-interval, in miliseconds, after which
  346. a tasktracker is declared 'lost' if it doesn't send heartbeats.
  347. </td>
  348. </tr>
  349. <tr>
  350. <td><a name="mapred.map.tasks">mapred.map.tasks</a></td><td>2</td><td>The default number of map tasks per job. Typically set
  351. to a prime several times greater than number of available hosts.
  352. Ignored when mapred.job.tracker is "local".
  353. </td>
  354. </tr>
  355. <tr>
  356. <td><a name="mapred.reduce.tasks">mapred.reduce.tasks</a></td><td>1</td><td>The default number of reduce tasks per job. Typically set
  357. to a prime close to the number of available hosts. Ignored when
  358. mapred.job.tracker is "local".
  359. </td>
  360. </tr>
  361. <tr>
  362. <td><a name="mapred.map.max.attempts">mapred.map.max.attempts</a></td><td>4</td><td>Expert: The maximum number of attempts per map task.
  363. In other words, framework will try to execute a map task these many number
  364. of times before giving up on it.
  365. </td>
  366. </tr>
  367. <tr>
  368. <td><a name="mapred.reduce.max.attempts">mapred.reduce.max.attempts</a></td><td>4</td><td>Expert: The maximum number of attempts per reduce task.
  369. In other words, framework will try to execute a reduce task these many number
  370. of times before giving up on it.
  371. </td>
  372. </tr>
  373. <tr>
  374. <td><a name="mapred.reduce.parallel.copies">mapred.reduce.parallel.copies</a></td><td>5</td><td>The default number of parallel transfers run by reduce
  375. during the copy(shuffle) phase.
  376. </td>
  377. </tr>
  378. <tr>
  379. <td><a name="mapred.reduce.copy.backoff">mapred.reduce.copy.backoff</a></td><td>300</td><td>The maximum amount of time (in seconds) a reducer spends on
  380. fetching one map output before declaring it as failed.
  381. </td>
  382. </tr>
  383. <tr>
  384. <td><a name="mapred.task.timeout">mapred.task.timeout</a></td><td>600000</td><td>The number of milliseconds before a task will be
  385. terminated if it neither reads an input, writes an output, nor
  386. updates its status string.
  387. </td>
  388. </tr>
  389. <tr>
  390. <td><a name="mapred.tasktracker.map.tasks.maximum">mapred.tasktracker.map.tasks.maximum</a></td><td>2</td><td>The maximum number of map tasks that will be run
  391. simultaneously by a task tracker.
  392. </td>
  393. </tr>
  394. <tr>
  395. <td><a name="mapred.tasktracker.reduce.tasks.maximum">mapred.tasktracker.reduce.tasks.maximum</a></td><td>2</td><td>The maximum number of reduce tasks that will be run
  396. simultaneously by a task tracker.
  397. </td>
  398. </tr>
  399. <tr>
  400. <td><a name="mapred.jobtracker.completeuserjobs.maximum">mapred.jobtracker.completeuserjobs.maximum</a></td><td>100</td><td>The maximum number of complete jobs per user to keep around before delegating them to the job history.
  401. </td>
  402. </tr>
  403. <tr>
  404. <td><a name="mapred.child.java.opts">mapred.child.java.opts</a></td><td>-Xmx200m</td><td>Java opts for the task tracker child processes.
  405. The following symbol, if present, will be interpolated: @taskid@ is replaced
  406. by current TaskID. Any other occurrences of '@' will go unchanged.
  407. For example, to enable verbose gc logging to a file named for the taskid in
  408. /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
  409. -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
  410. </td>
  411. </tr>
  412. <tr>
  413. <td><a name="mapred.inmem.merge.threshold">mapred.inmem.merge.threshold</a></td><td>1000</td><td>The threshold, in terms of the number of files
  414. for the in-memory merge process. When we accumulate threshold number of files
  415. we initiate the in-memory merge and spill to disk. A value of 0 or less than
  416. 0 indicates we want to DON'T have any threshold and instead depend only on
  417. the ramfs's memory consumption to trigger the merge.
  418. </td>
  419. </tr>
  420. <tr>
  421. <td><a name="mapred.map.tasks.speculative.execution">mapred.map.tasks.speculative.execution</a></td><td>true</td><td>If true, then multiple instances of some map tasks
  422. may be executed in parallel.</td>
  423. </tr>
  424. <tr>
  425. <td><a name="mapred.reduce.tasks.speculative.execution">mapred.reduce.tasks.speculative.execution</a></td><td>true</td><td>If true, then multiple instances of some reduce tasks
  426. may be executed in parallel.</td>
  427. </tr>
  428. <tr>
  429. <td><a name="mapred.min.split.size">mapred.min.split.size</a></td><td>0</td><td>The minimum size chunk that map input should be split
  430. into. Note that some file formats may have minimum split sizes that
  431. take priority over this setting.</td>
  432. </tr>
  433. <tr>
  434. <td><a name="mapred.submit.replication">mapred.submit.replication</a></td><td>10</td><td>The replication level for submitted job files. This
  435. should be around the square root of the number of nodes.
  436. </td>
  437. </tr>
  438. <tr>
  439. <td><a name="mapred.tasktracker.dns.interface">mapred.tasktracker.dns.interface</a></td><td>default</td><td>The name of the Network Interface from which a task
  440. tracker should report its IP address.
  441. </td>
  442. </tr>
  443. <tr>
  444. <td><a name="mapred.tasktracker.dns.nameserver">mapred.tasktracker.dns.nameserver</a></td><td>default</td><td>The host name or IP address of the name server (DNS)
  445. which a TaskTracker should use to determine the host name used by
  446. the JobTracker for communication and display purposes.
  447. </td>
  448. </tr>
  449. <tr>
  450. <td><a name="tasktracker.http.threads">tasktracker.http.threads</a></td><td>40</td><td>The number of worker threads that for the http server. This is
  451. used for map output fetching
  452. </td>
  453. </tr>
  454. <tr>
  455. <td><a name="mapred.task.tracker.http.bindAddress">mapred.task.tracker.http.bindAddress</a></td><td>0.0.0.0:50060</td><td>
  456. The task tracker http server bind address and port.
  457. If the port is 0 then the server will start on a free port.
  458. </td>
  459. </tr>
  460. <tr>
  461. <td><a name="keep.failed.task.files">keep.failed.task.files</a></td><td>false</td><td>Should the files for failed tasks be kept. This should only be
  462. used on jobs that are failing, because the storage is never
  463. reclaimed. It also prevents the map outputs from being erased
  464. from the reduce directory as they are consumed.</td>
  465. </tr>
  466. <tr>
  467. <td><a name="mapred.output.compress">mapred.output.compress</a></td><td>false</td><td>Should the job outputs be compressed?
  468. </td>
  469. </tr>
  470. <tr>
  471. <td><a name="mapred.output.compression.type">mapred.output.compression.type</a></td><td>RECORD</td><td>If the job outputs are to compressed as SequenceFiles, how should
  472. they be compressed? Should be one of NONE, RECORD or BLOCK.
  473. </td>
  474. </tr>
  475. <tr>
  476. <td><a name="mapred.output.compression.codec">mapred.output.compression.codec</a></td><td>org.apache.hadoop.io.compress.DefaultCodec</td><td>If the job outputs are compressed, how should they be compressed?
  477. </td>
  478. </tr>
  479. <tr>
  480. <td><a name="mapred.compress.map.output">mapred.compress.map.output</a></td><td>false</td><td>Should the outputs of the maps be compressed before being
  481. sent across the network. Uses SequenceFile compression.
  482. </td>
  483. </tr>
  484. <tr>
  485. <td><a name="mapred.map.output.compression.type">mapred.map.output.compression.type</a></td><td>RECORD</td><td>If the map outputs are to compressed, how should they
  486. be compressed? Should be one of NONE, RECORD or BLOCK.
  487. </td>
  488. </tr>
  489. <tr>
  490. <td><a name="mapred.map.output.compression.codec">mapred.map.output.compression.codec</a></td><td>org.apache.hadoop.io.compress.DefaultCodec</td><td>If the map outputs are compressed, how should they be
  491. compressed?
  492. </td>
  493. </tr>
  494. <tr>
  495. <td><a name="io.seqfile.compress.blocksize">io.seqfile.compress.blocksize</a></td><td>1000000</td><td>The minimum block size for compression in block compressed
  496. SequenceFiles.
  497. </td>
  498. </tr>
  499. <tr>
  500. <td><a name="io.seqfile.lazydecompress">io.seqfile.lazydecompress</a></td><td>true</td><td>Should values of block-compressed SequenceFiles be decompressed
  501. only when necessary.
  502. </td>
  503. </tr>
  504. <tr>
  505. <td><a name="io.seqfile.sorter.recordlimit">io.seqfile.sorter.recordlimit</a></td><td>1000000</td><td>The limit on number of records to be kept in memory in a spill
  506. in SequenceFiles.Sorter
  507. </td>
  508. </tr>
  509. <tr>
  510. <td><a name="io.seqfile.compression.type">io.seqfile.compression.type</a></td><td>RECORD</td><td>The default compression type for SequenceFile.Writer.
  511. </td>
  512. </tr>
  513. <tr>
  514. <td><a name="map.sort.class">map.sort.class</a></td><td>org.apache.hadoop.mapred.MergeSorter</td><td>The default sort class for sorting keys.
  515. </td>
  516. </tr>
  517. <tr>
  518. <td><a name="mapred.userlog.limit.kb">mapred.userlog.limit.kb</a></td><td>0</td><td>The maximum size of user-logs of each task in KB. 0 disables the cap.
  519. </td>
  520. </tr>
  521. <tr>
  522. <td><a name="mapred.userlog.retain.hours">mapred.userlog.retain.hours</a></td><td>24</td><td>The maximum time, in hours, for which the user-logs are to be
  523. retained.
  524. </td>
  525. </tr>
  526. <tr>
  527. <td><a name="mapred.hosts">mapred.hosts</a></td><td></td><td>Names a file that contains the list of nodes that may
  528. connect to the jobtracker. If the value is empty, all hosts are
  529. permitted.</td>
  530. </tr>
  531. <tr>
  532. <td><a name="mapred.hosts.exclude">mapred.hosts.exclude</a></td><td></td><td>Names a file that contains the list of hosts that
  533. should be excluded by the jobtracker. If the value is empty, no
  534. hosts are excluded.</td>
  535. </tr>
  536. <tr>
  537. <td><a name="mapred.max.tracker.failures">mapred.max.tracker.failures</a></td><td>4</td><td>The number of task-failures on a tasktracker of a given job
  538. after which new tasks of that job aren't assigned to it.
  539. </td>
  540. </tr>
  541. <tr>
  542. <td><a name="jobclient.output.filter">jobclient.output.filter</a></td><td>FAILED</td><td>The filter for controlling the output of the task's userlogs sent
  543. to the console of the JobClient.
  544. The permissible options are: NONE, KILLED, FAILED, SUCCEEDED and
  545. ALL.
  546. </td>
  547. </tr>
  548. <tr>
  549. <td><a name="ipc.client.timeout">ipc.client.timeout</a></td><td>60000</td><td>Defines the timeout for IPC calls in milliseconds.</td>
  550. </tr>
  551. <tr>
  552. <td><a name="ipc.client.idlethreshold">ipc.client.idlethreshold</a></td><td>4000</td><td>Defines the threshold number of connections after which
  553. connections will be inspected for idleness.
  554. </td>
  555. </tr>
  556. <tr>
  557. <td><a name="ipc.client.maxidletime">ipc.client.maxidletime</a></td><td>120000</td><td>Defines the maximum idle time for a connected client after
  558. which it may be disconnected.
  559. </td>
  560. </tr>
  561. <tr>
  562. <td><a name="ipc.client.kill.max">ipc.client.kill.max</a></td><td>10</td><td>Defines the maximum number of clients to disconnect in one go.
  563. </td>
  564. </tr>
  565. <tr>
  566. <td><a name="ipc.client.connection.maxidletime">ipc.client.connection.maxidletime</a></td><td>1000</td><td>The maximum time after which a client will bring down the
  567. connection to the server.
  568. </td>
  569. </tr>
  570. <tr>
  571. <td><a name="ipc.client.connect.max.retries">ipc.client.connect.max.retries</a></td><td>10</td><td>Indicates the number of retries a client will make to establish
  572. a server connection.
  573. </td>
  574. </tr>
  575. <tr>
  576. <td><a name="ipc.server.listen.queue.size">ipc.server.listen.queue.size</a></td><td>128</td><td>Indicates the length of the listen queue for servers accepting
  577. client connections.
  578. </td>
  579. </tr>
  580. <tr>
  581. <td><a name="job.end.retry.attempts">job.end.retry.attempts</a></td><td>0</td><td>Indicates how many times hadoop should attempt to contact the
  582. notification URL </td>
  583. </tr>
  584. <tr>
  585. <td><a name="job.end.retry.interval">job.end.retry.interval</a></td><td>30000</td><td>Indicates time in milliseconds between notification URL retry
  586. calls</td>
  587. </tr>
  588. <tr>
  589. <td><a name="webinterface.private.actions">webinterface.private.actions</a></td><td>false</td><td> If set to true, the web interfaces of JT and NN may contain
  590. actions, such as kill job, delete file, etc., that should
  591. not be exposed to public. Enable this option if the interfaces
  592. are only reachable by those who have the right authorization.
  593. </td>
  594. </tr>
  595. <tr>
  596. <td><a name="hadoop.rpc.socket.factory.class.default">hadoop.rpc.socket.factory.class.default</a></td><td>org.apache.hadoop.net.StandardSocketFactory</td><td> Default SocketFactory to use. This parameter is expected to be
  597. formatted as "package.FactoryClassName".
  598. </td>
  599. </tr>
  600. <tr>
  601. <td><a name="hadoop.rpc.socket.factory.class.ClientProtocol">hadoop.rpc.socket.factory.class.ClientProtocol</a></td><td></td><td> SocketFactory to use to connect to a DFS. If null or empty, use
  602. hadoop.rpc.socket.class.default. This socket factory is also used by
  603. DFSClient to create sockets to DataNodes.
  604. </td>
  605. </tr>
  606. <tr>
  607. <td><a name="hadoop.rpc.socket.factory.class.JobSubmissionProtocol">hadoop.rpc.socket.factory.class.JobSubmissionProtocol</a></td><td></td><td> SocketFactory to use to connect to a Map/Reduce master
  608. (JobTracker). If null or empty, then use hadoop.rpc.socket.class.default.
  609. </td>
  610. </tr>
  611. <tr>
  612. <td><a name="hadoop.socks.server">hadoop.socks.server</a></td><td></td><td> Address (host:port) of the SOCKS server to be used by the
  613. SocksSocketFactory.
  614. </td>
  615. </tr>
  616. </table>
  617. </body>
  618. </html>