123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993 |
- <html>
- <body>
- <table border="1">
- <tr>
- <td>name</td><td>value</td><td>description</td>
- </tr>
- <tr>
- <td><a name="hadoop.tmp.dir">hadoop.tmp.dir</a></td><td>/tmp/hadoop-${user.name}</td><td>A base for other temporary directories.</td>
- </tr>
- <tr>
- <td><a name="hadoop.native.lib">hadoop.native.lib</a></td><td>true</td><td>Should native hadoop libraries, if present, be used.</td>
- </tr>
- <tr>
- <td><a name="hadoop.http.filter.initializers">hadoop.http.filter.initializers</a></td><td></td><td>A comma separated list of class names. Each class in the list
- must extend org.apache.hadoop.http.FilterInitializer. The corresponding
- Filter will be initialized. Then, the Filter will be applied to all user
- facing jsp and servlet web pages. The ordering of the list defines the
- ordering of the filters.</td>
- </tr>
- <tr>
- <td><a name="hadoop.logfile.size">hadoop.logfile.size</a></td><td>10000000</td><td>The max size of each log file</td>
- </tr>
- <tr>
- <td><a name="hadoop.logfile.count">hadoop.logfile.count</a></td><td>10</td><td>The max number of log files</td>
- </tr>
- <tr>
- <td><a name="hadoop.job.history.location">hadoop.job.history.location</a></td><td></td><td> If job tracker is static the history files are stored
- in this single well known place. If No value is set here, by default,
- it is in the local file system at ${hadoop.log.dir}/history.
- </td>
- </tr>
- <tr>
- <td><a name="hadoop.job.history.user.location">hadoop.job.history.user.location</a></td><td></td><td> User can specify a location to store the history files of
- a particular job. If nothing is specified, the logs are stored in
- output directory. The files are stored in "_logs/history/" in the directory.
- User can stop logging by giving the value "none".
- </td>
- </tr>
- <tr>
- <td><a name="dfs.namenode.logging.level">dfs.namenode.logging.level</a></td><td>info</td><td>The logging level for dfs namenode. Other values are "dir"(trac
- e namespace mutations), "block"(trace block under/over replications and block
- creations/deletions), or "all".</td>
- </tr>
- <tr>
- <td><a name="io.sort.factor">io.sort.factor</a></td><td>10</td><td>The number of streams to merge at once while sorting
- files. This determines the number of open file handles.</td>
- </tr>
- <tr>
- <td><a name="io.sort.mb">io.sort.mb</a></td><td>100</td><td>The total amount of buffer memory to use while sorting
- files, in megabytes. By default, gives each merge stream 1MB, which
- should minimize seeks.</td>
- </tr>
- <tr>
- <td><a name="io.sort.record.percent">io.sort.record.percent</a></td><td>0.05</td><td>The percentage of io.sort.mb dedicated to tracking record
- boundaries. Let this value be r, io.sort.mb be x. The maximum number
- of records collected before the collection thread must block is equal
- to (r * x) / 4</td>
- </tr>
- <tr>
- <td><a name="io.sort.spill.percent">io.sort.spill.percent</a></td><td>0.80</td><td>The soft limit in either the buffer or record collection
- buffers. Once reached, a thread will begin to spill the contents to disk
- in the background. Note that this does not imply any chunking of data to
- the spill. A value less than 0.5 is not recommended.</td>
- </tr>
- <tr>
- <td><a name="io.file.buffer.size">io.file.buffer.size</a></td><td>4096</td><td>The size of buffer for use in sequence files.
- The size of this buffer should probably be a multiple of hardware
- page size (4096 on Intel x86), and it determines how much data is
- buffered during read and write operations.</td>
- </tr>
- <tr>
- <td><a name="io.bytes.per.checksum">io.bytes.per.checksum</a></td><td>512</td><td>The number of bytes per checksum. Must not be larger than
- io.file.buffer.size.</td>
- </tr>
- <tr>
- <td><a name="io.skip.checksum.errors">io.skip.checksum.errors</a></td><td>false</td><td>If true, when a checksum error is encountered while
- reading a sequence file, entries are skipped, instead of throwing an
- exception.</td>
- </tr>
- <tr>
- <td><a name="io.map.index.skip">io.map.index.skip</a></td><td>0</td><td>Number of index entries to skip between each entry.
- Zero by default. Setting this to values larger than zero can
- facilitate opening large map files using less memory.</td>
- </tr>
- <tr>
- <td><a name="io.compression.codecs">io.compression.codecs</a></td><td>org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec</td><td>A list of the compression codec classes that can be used
- for compression/decompression.</td>
- </tr>
- <tr>
- <td><a name="io.serializations">io.serializations</a></td><td>org.apache.hadoop.io.serializer.WritableSerialization</td><td>A list of serialization classes that can be used for
- obtaining serializers and deserializers.</td>
- </tr>
- <tr>
- <td><a name="fs.default.name">fs.default.name</a></td><td>file:///</td><td>The name of the default file system. A URI whose
- scheme and authority determine the FileSystem implementation. The
- uri's scheme determines the config property (fs.SCHEME.impl) naming
- the FileSystem implementation class. The uri's authority is used to
- determine the host, port, etc. for a filesystem.</td>
- </tr>
- <tr>
- <td><a name="fs.trash.interval">fs.trash.interval</a></td><td>0</td><td>Number of minutes between trash checkpoints.
- If zero, the trash feature is disabled.
- </td>
- </tr>
- <tr>
- <td><a name="fs.file.impl">fs.file.impl</a></td><td>org.apache.hadoop.fs.LocalFileSystem</td><td>The FileSystem for file: uris.</td>
- </tr>
- <tr>
- <td><a name="fs.hdfs.impl">fs.hdfs.impl</a></td><td>org.apache.hadoop.hdfs.DistributedFileSystem</td><td>The FileSystem for hdfs: uris.</td>
- </tr>
- <tr>
- <td><a name="fs.s3.impl">fs.s3.impl</a></td><td>org.apache.hadoop.fs.s3.S3FileSystem</td><td>The FileSystem for s3: uris.</td>
- </tr>
- <tr>
- <td><a name="fs.s3n.impl">fs.s3n.impl</a></td><td>org.apache.hadoop.fs.s3native.NativeS3FileSystem</td><td>The FileSystem for s3n: (Native S3) uris.</td>
- </tr>
- <tr>
- <td><a name="fs.kfs.impl">fs.kfs.impl</a></td><td>org.apache.hadoop.fs.kfs.KosmosFileSystem</td><td>The FileSystem for kfs: uris.</td>
- </tr>
- <tr>
- <td><a name="fs.hftp.impl">fs.hftp.impl</a></td><td>org.apache.hadoop.hdfs.HftpFileSystem</td><td></td>
- </tr>
- <tr>
- <td><a name="fs.hsftp.impl">fs.hsftp.impl</a></td><td>org.apache.hadoop.hdfs.HsftpFileSystem</td><td></td>
- </tr>
- <tr>
- <td><a name="fs.ftp.impl">fs.ftp.impl</a></td><td>org.apache.hadoop.fs.ftp.FTPFileSystem</td><td>The FileSystem for ftp: uris.</td>
- </tr>
- <tr>
- <td><a name="fs.ramfs.impl">fs.ramfs.impl</a></td><td>org.apache.hadoop.fs.InMemoryFileSystem</td><td>The FileSystem for ramfs: uris.</td>
- </tr>
- <tr>
- <td><a name="fs.har.impl">fs.har.impl</a></td><td>org.apache.hadoop.fs.HarFileSystem</td><td>The filesystem for Hadoop archives. </td>
- </tr>
- <tr>
- <td><a name="fs.checkpoint.dir">fs.checkpoint.dir</a></td><td>${hadoop.tmp.dir}/dfs/namesecondary</td><td>Determines where on the local filesystem the DFS secondary
- name node should store the temporary images to merge.
- If this is a comma-delimited list of directories then the image is
- replicated in all of the directories for redundancy.
- </td>
- </tr>
- <tr>
- <td><a name="fs.checkpoint.edits.dir">fs.checkpoint.edits.dir</a></td><td>${fs.checkpoint.dir}</td><td>Determines where on the local filesystem the DFS secondary
- name node should store the temporary edits to merge.
- If this is a comma-delimited list of directoires then teh edits is
- replicated in all of the directoires for redundancy.
- Default value is same as fs.checkpoint.dir
- </td>
- </tr>
- <tr>
- <td><a name="fs.checkpoint.period">fs.checkpoint.period</a></td><td>3600</td><td>The number of seconds between two periodic checkpoints.
- </td>
- </tr>
- <tr>
- <td><a name="fs.checkpoint.size">fs.checkpoint.size</a></td><td>67108864</td><td>The size of the current edit log (in bytes) that triggers
- a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
- </td>
- </tr>
- <tr>
- <td><a name="dfs.secondary.http.address">dfs.secondary.http.address</a></td><td>0.0.0.0:50090</td><td>
- The secondary namenode http server address and port.
- If the port is 0 then the server will start on a free port.
- </td>
- </tr>
- <tr>
- <td><a name="dfs.datanode.address">dfs.datanode.address</a></td><td>0.0.0.0:50010</td><td>
- The address where the datanode server will listen to.
- If the port is 0 then the server will start on a free port.
- </td>
- </tr>
- <tr>
- <td><a name="dfs.datanode.http.address">dfs.datanode.http.address</a></td><td>0.0.0.0:50075</td><td>
- The datanode http server address and port.
- If the port is 0 then the server will start on a free port.
- </td>
- </tr>
- <tr>
- <td><a name="dfs.datanode.ipc.address">dfs.datanode.ipc.address</a></td><td>0.0.0.0:50020</td><td>
- The datanode ipc server address and port.
- If the port is 0 then the server will start on a free port.
- </td>
- </tr>
- <tr>
- <td><a name="dfs.datanode.handler.count">dfs.datanode.handler.count</a></td><td>3</td><td>The number of server threads for the datanode.</td>
- </tr>
- <tr>
- <td><a name="dfs.http.address">dfs.http.address</a></td><td>0.0.0.0:50070</td><td>
- The address and the base port where the dfs namenode web ui will listen on.
- If the port is 0 then the server will start on a free port.
- </td>
- </tr>
- <tr>
- <td><a name="dfs.datanode.https.address">dfs.datanode.https.address</a></td><td>0.0.0.0:50475</td><td></td>
- </tr>
- <tr>
- <td><a name="dfs.https.address">dfs.https.address</a></td><td>0.0.0.0:50470</td><td></td>
- </tr>
- <tr>
- <td><a name="https.keystore.info.rsrc">https.keystore.info.rsrc</a></td><td>sslinfo.xml</td><td>The name of the resource from which ssl keystore information
- will be extracted
- </td>
- </tr>
- <tr>
- <td><a name="dfs.datanode.dns.interface">dfs.datanode.dns.interface</a></td><td>default</td><td>The name of the Network Interface from which a data node should
- report its IP address.
- </td>
- </tr>
- <tr>
- <td><a name="dfs.datanode.dns.nameserver">dfs.datanode.dns.nameserver</a></td><td>default</td><td>The host name or IP address of the name server (DNS)
- which a DataNode should use to determine the host name used by the
- NameNode for communication and display purposes.
- </td>
- </tr>
- <tr>
- <td><a name="dfs.replication.considerLoad">dfs.replication.considerLoad</a></td><td>true</td><td>Decide if chooseTarget considers the target's load or not
- </td>
- </tr>
- <tr>
- <td><a name="dfs.default.chunk.view.size">dfs.default.chunk.view.size</a></td><td>32768</td><td>The number of bytes to view for a file on the browser.
- </td>
- </tr>
- <tr>
- <td><a name="dfs.datanode.du.reserved">dfs.datanode.du.reserved</a></td><td>0</td><td>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
- </td>
- </tr>
- <tr>
- <td><a name="dfs.datanode.du.pct">dfs.datanode.du.pct</a></td><td>0.98f</td><td>When calculating remaining space, only use this percentage of the real available space
- </td>
- </tr>
- <tr>
- <td><a name="dfs.name.dir">dfs.name.dir</a></td><td>${hadoop.tmp.dir}/dfs/name</td><td>Determines where on the local filesystem the DFS name node
- should store the name table(fsimage). If this is a comma-delimited list
- of directories then the name table is replicated in all of the
- directories, for redundancy. </td>
- </tr>
- <tr>
- <td><a name="dfs.name.edits.dir">dfs.name.edits.dir</a></td><td>${dfs.name.dir}</td><td>Determines where on the local filesystem the DFS name node
- should store the transaction (edits) file. If this is a comma-delimited list
- of directories then the transaction file is replicated in all of the
- directories, for redundancy. Default value is same as dfs.name.dir
- </td>
- </tr>
- <tr>
- <td><a name="dfs.web.ugi">dfs.web.ugi</a></td><td>webuser,webgroup</td><td>The user account used by the web interface.
- Syntax: USERNAME,GROUP1,GROUP2, ...
- </td>
- </tr>
- <tr>
- <td><a name="dfs.permissions">dfs.permissions</a></td><td>true</td><td>
- If "true", enable permission checking in HDFS.
- If "false", permission checking is turned off,
- but all other behavior is unchanged.
- Switching from one parameter value to the other does not change the mode,
- owner or group of files or directories.
- </td>
- </tr>
- <tr>
- <td><a name="dfs.permissions.supergroup">dfs.permissions.supergroup</a></td><td>supergroup</td><td>The name of the group of super-users.</td>
- </tr>
- <tr>
- <td><a name="dfs.data.dir">dfs.data.dir</a></td><td>${hadoop.tmp.dir}/dfs/data</td><td>Determines where on the local filesystem an DFS data node
- should store its blocks. If this is a comma-delimited
- list of directories, then data will be stored in all named
- directories, typically on different devices.
- Directories that do not exist are ignored.
- </td>
- </tr>
- <tr>
- <td><a name="dfs.replication">dfs.replication</a></td><td>3</td><td>Default block replication.
- The actual number of replications can be specified when the file is created.
- The default is used if replication is not specified in create time.
- </td>
- </tr>
- <tr>
- <td><a name="dfs.replication.max">dfs.replication.max</a></td><td>512</td><td>Maximal block replication.
- </td>
- </tr>
- <tr>
- <td><a name="dfs.replication.min">dfs.replication.min</a></td><td>1</td><td>Minimal block replication.
- </td>
- </tr>
- <tr>
- <td><a name="dfs.block.size">dfs.block.size</a></td><td>67108864</td><td>The default block size for new files.</td>
- </tr>
- <tr>
- <td><a name="dfs.df.interval">dfs.df.interval</a></td><td>60000</td><td>Disk usage statistics refresh interval in msec.</td>
- </tr>
- <tr>
- <td><a name="dfs.client.block.write.retries">dfs.client.block.write.retries</a></td><td>3</td><td>The number of retries for writing blocks to the data nodes,
- before we signal failure to the application.
- </td>
- </tr>
- <tr>
- <td><a name="dfs.blockreport.intervalMsec">dfs.blockreport.intervalMsec</a></td><td>3600000</td><td>Determines block reporting interval in milliseconds.</td>
- </tr>
- <tr>
- <td><a name="dfs.blockreport.initialDelay">dfs.blockreport.initialDelay</a></td><td>0</td><td>Delay for first block report in seconds.</td>
- </tr>
- <tr>
- <td><a name="dfs.heartbeat.interval">dfs.heartbeat.interval</a></td><td>3</td><td>Determines datanode heartbeat interval in seconds.</td>
- </tr>
- <tr>
- <td><a name="dfs.namenode.handler.count">dfs.namenode.handler.count</a></td><td>10</td><td>The number of server threads for the namenode.</td>
- </tr>
- <tr>
- <td><a name="dfs.safemode.threshold.pct">dfs.safemode.threshold.pct</a></td><td>0.999f</td><td>
- Specifies the percentage of blocks that should satisfy
- the minimal replication requirement defined by dfs.replication.min.
- Values less than or equal to 0 mean not to start in safe mode.
- Values greater than 1 will make safe mode permanent.
- </td>
- </tr>
- <tr>
- <td><a name="dfs.safemode.extension">dfs.safemode.extension</a></td><td>30000</td><td>
- Determines extension of safe mode in milliseconds
- after the threshold level is reached.
- </td>
- </tr>
- <tr>
- <td><a name="dfs.balance.bandwidthPerSec">dfs.balance.bandwidthPerSec</a></td><td>1048576</td><td>
- Specifies the maximum amount of bandwidth that each datanode
- can utilize for the balancing purpose in term of
- the number of bytes per second.
- </td>
- </tr>
- <tr>
- <td><a name="dfs.hosts">dfs.hosts</a></td><td></td><td>Names a file that contains a list of hosts that are
- permitted to connect to the namenode. The full pathname of the file
- must be specified. If the value is empty, all hosts are
- permitted.</td>
- </tr>
- <tr>
- <td><a name="dfs.hosts.exclude">dfs.hosts.exclude</a></td><td></td><td>Names a file that contains a list of hosts that are
- not permitted to connect to the namenode. The full pathname of the
- file must be specified. If the value is empty, no hosts are
- excluded.</td>
- </tr>
- <tr>
- <td><a name="dfs.max.objects">dfs.max.objects</a></td><td>0</td><td>The maximum number of files, directories and blocks
- dfs supports. A value of zero indicates no limit to the number
- of objects that dfs supports.
- </td>
- </tr>
- <tr>
- <td><a name="dfs.namenode.decommission.interval">dfs.namenode.decommission.interval</a></td><td>300</td><td>Namenode periodicity in seconds to check if decommission is
- complete.</td>
- </tr>
- <tr>
- <td><a name="dfs.replication.interval">dfs.replication.interval</a></td><td>3</td><td>The periodicity in seconds with which the namenode computes
- repliaction work for datanodes. </td>
- </tr>
- <tr>
- <td><a name="dfs.access.time.precision">dfs.access.time.precision</a></td><td>3600000</td><td>The access time for HDFS file is precise upto this value.
- The default value is 1 hour. Setting a value of 0 disables
- access times for HDFS.
- </td>
- </tr>
- <tr>
- <td><a name="fs.s3.block.size">fs.s3.block.size</a></td><td>67108864</td><td>Block size to use when writing files to S3.</td>
- </tr>
- <tr>
- <td><a name="fs.s3.buffer.dir">fs.s3.buffer.dir</a></td><td>${hadoop.tmp.dir}/s3</td><td>Determines where on the local filesystem the S3 filesystem
- should store files before sending them to S3
- (or after retrieving them from S3).
- </td>
- </tr>
- <tr>
- <td><a name="fs.s3.maxRetries">fs.s3.maxRetries</a></td><td>4</td><td>The maximum number of retries for reading or writing files to S3,
- before we signal failure to the application.
- </td>
- </tr>
- <tr>
- <td><a name="fs.s3.sleepTimeSeconds">fs.s3.sleepTimeSeconds</a></td><td>10</td><td>The number of seconds to sleep between each S3 retry.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.job.tracker">mapred.job.tracker</a></td><td>local</td><td>The host and port that the MapReduce job tracker runs
- at. If "local", then jobs are run in-process as a single map
- and reduce task.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.job.tracker.http.address">mapred.job.tracker.http.address</a></td><td>0.0.0.0:50030</td><td>
- The job tracker http server address and port the server will listen on.
- If the port is 0 then the server will start on a free port.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.job.tracker.handler.count">mapred.job.tracker.handler.count</a></td><td>10</td><td>
- The number of server threads for the JobTracker. This should be roughly
- 4% of the number of tasktracker nodes.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.task.tracker.report.address">mapred.task.tracker.report.address</a></td><td>127.0.0.1:0</td><td>The interface and port that task tracker server listens on.
- Since it is only connected to by the tasks, it uses the local interface.
- EXPERT ONLY. Should only be changed if your host does not have the loopback
- interface.</td>
- </tr>
- <tr>
- <td><a name="mapred.local.dir">mapred.local.dir</a></td><td>${hadoop.tmp.dir}/mapred/local</td><td>The local directory where MapReduce stores intermediate
- data files. May be a comma-separated list of
- directories on different devices in order to spread disk i/o.
- Directories that do not exist are ignored.
- </td>
- </tr>
- <tr>
- <td><a name="local.cache.size">local.cache.size</a></td><td>10737418240</td><td>The limit on the size of cache you want to keep, set by default
- to 10GB. This will act as a soft limit on the cache directory for out of band data.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.system.dir">mapred.system.dir</a></td><td>${hadoop.tmp.dir}/mapred/system</td><td>The shared directory where MapReduce stores control files.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.temp.dir">mapred.temp.dir</a></td><td>${hadoop.tmp.dir}/mapred/temp</td><td>A shared directory for temporary files.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.local.dir.minspacestart">mapred.local.dir.minspacestart</a></td><td>0</td><td>If the space in mapred.local.dir drops under this,
- do not ask for more tasks.
- Value in bytes.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.local.dir.minspacekill">mapred.local.dir.minspacekill</a></td><td>0</td><td>If the space in mapred.local.dir drops under this,
- do not ask more tasks until all the current ones have finished and
- cleaned up. Also, to save the rest of the tasks we have running,
- kill one of them, to clean up some space. Start with the reduce tasks,
- then go with the ones that have finished the least.
- Value in bytes.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.tasktracker.expiry.interval">mapred.tasktracker.expiry.interval</a></td><td>600000</td><td>Expert: The time-interval, in miliseconds, after which
- a tasktracker is declared 'lost' if it doesn't send heartbeats.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.tasktracker.instrumentation">mapred.tasktracker.instrumentation</a></td><td>org.apache.hadoop.mapred.TaskTrackerMetricsInst</td><td>Expert: The instrumentation class to associate with each TaskTracker.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.tasktracker.taskmemorymanager.monitoring-interval">mapred.tasktracker.taskmemorymanager.monitoring-interval</a></td><td>5000</td><td>The interval, in milliseconds, for which the tasktracker waits
- between two cycles of monitoring its tasks' memory usage. Used only if
- tasks' memory management is enabled via mapred.tasktracker.tasks.maxmemory.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.tasktracker.procfsbasedprocesstree.sleeptime-before-sigkill">mapred.tasktracker.procfsbasedprocesstree.sleeptime-before-sigkill</a></td><td>5000</td><td>The time, in milliseconds, the tasktracker waits for sending a
- SIGKILL to a process that has overrun memory limits, after it has been sent
- a SIGTERM. Used only if tasks' memory management is enabled via
- mapred.tasktracker.tasks.maxmemory.</td>
- </tr>
- <tr>
- <td><a name="mapred.map.tasks">mapred.map.tasks</a></td><td>2</td><td>The default number of map tasks per job. Typically set
- to a prime several times greater than number of available hosts.
- Ignored when mapred.job.tracker is "local".
- </td>
- </tr>
- <tr>
- <td><a name="mapred.reduce.tasks">mapred.reduce.tasks</a></td><td>1</td><td>The default number of reduce tasks per job. Typically set
- to a prime close to the number of available hosts. Ignored when
- mapred.job.tracker is "local".
- </td>
- </tr>
- <tr>
- <td><a name="mapred.jobtracker.restart.recover">mapred.jobtracker.restart.recover</a></td><td>false</td><td>"true" to enable (job) recovery upon restart,
- "false" to start afresh
- </td>
- </tr>
- <tr>
- <td><a name="mapred.jobtracker.job.history.block.size">mapred.jobtracker.job.history.block.size</a></td><td>3145728</td><td>The block size of the job history file. Since the job recovery
- uses job history, its important to dump job history to disk as
- soon as possible. Note that this is an expert level parameter.
- The default value is set to 3 MB.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.jobtracker.taskScheduler">mapred.jobtracker.taskScheduler</a></td><td>org.apache.hadoop.mapred.JobQueueTaskScheduler</td><td>The class responsible for scheduling the tasks.</td>
- </tr>
- <tr>
- <td><a name="mapred.jobtracker.taskScheduler.maxRunningTasksPerJob">mapred.jobtracker.taskScheduler.maxRunningTasksPerJob</a></td><td></td><td>The maximum number of running tasks for a job before
- it gets preempted. No limits if undefined.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.map.max.attempts">mapred.map.max.attempts</a></td><td>4</td><td>Expert: The maximum number of attempts per map task.
- In other words, framework will try to execute a map task these many number
- of times before giving up on it.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.reduce.max.attempts">mapred.reduce.max.attempts</a></td><td>4</td><td>Expert: The maximum number of attempts per reduce task.
- In other words, framework will try to execute a reduce task these many number
- of times before giving up on it.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.reduce.parallel.copies">mapred.reduce.parallel.copies</a></td><td>5</td><td>The default number of parallel transfers run by reduce
- during the copy(shuffle) phase.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.reduce.copy.backoff">mapred.reduce.copy.backoff</a></td><td>300</td><td>The maximum amount of time (in seconds) a reducer spends on
- fetching one map output before declaring it as failed.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.task.timeout">mapred.task.timeout</a></td><td>600000</td><td>The number of milliseconds before a task will be
- terminated if it neither reads an input, writes an output, nor
- updates its status string.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.tasktracker.map.tasks.maximum">mapred.tasktracker.map.tasks.maximum</a></td><td>2</td><td>The maximum number of map tasks that will be run
- simultaneously by a task tracker.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.tasktracker.reduce.tasks.maximum">mapred.tasktracker.reduce.tasks.maximum</a></td><td>2</td><td>The maximum number of reduce tasks that will be run
- simultaneously by a task tracker.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.jobtracker.completeuserjobs.maximum">mapred.jobtracker.completeuserjobs.maximum</a></td><td>100</td><td>The maximum number of complete jobs per user to keep around
- before delegating them to the job history.</td>
- </tr>
- <tr>
- <td><a name="mapred.jobtracker.instrumentation">mapred.jobtracker.instrumentation</a></td><td>org.apache.hadoop.mapred.JobTrackerMetricsInst</td><td>Expert: The instrumentation class to associate with each JobTracker.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.child.java.opts">mapred.child.java.opts</a></td><td>-Xmx200m</td><td>Java opts for the task tracker child processes.
- The following symbol, if present, will be interpolated: @taskid@ is replaced
- by current TaskID. Any other occurrences of '@' will go unchanged.
- For example, to enable verbose gc logging to a file named for the taskid in
- /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
- -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
-
- The configuration variable mapred.child.ulimit can be used to control the
- maximum virtual memory of the child processes.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.child.ulimit">mapred.child.ulimit</a></td><td></td><td>The maximum virtual memory, in KB, of a process launched by the
- Map-Reduce framework. This can be used to control both the Mapper/Reducer
- tasks and applications using Hadoop Pipes, Hadoop Streaming etc.
- By default it is left unspecified to let cluster admins control it via
- limits.conf and other such relevant mechanisms.
-
- Note: mapred.child.ulimit must be greater than or equal to the -Xmx passed to
- JavaVM, else the VM might not start.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.child.tmp">mapred.child.tmp</a></td><td>./tmp</td><td> To set the value of tmp directory for map and reduce tasks.
- If the value is an absolute path, it is directly assigned. Otherwise, it is
- prepended with task's working directory. The java tasks are executed with
- option -Djava.io.tmpdir='the absolute path of the tmp dir'. Pipes and
- streaming are set with environment variable,
- TMPDIR='the absolute path of the tmp dir'
- </td>
- </tr>
- <tr>
- <td><a name="mapred.inmem.merge.threshold">mapred.inmem.merge.threshold</a></td><td>1000</td><td>The threshold, in terms of the number of files
- for the in-memory merge process. When we accumulate threshold number of files
- we initiate the in-memory merge and spill to disk. A value of 0 or less than
- 0 indicates we want to DON'T have any threshold and instead depend only on
- the ramfs's memory consumption to trigger the merge.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.job.shuffle.merge.percent">mapred.job.shuffle.merge.percent</a></td><td>0.66</td><td>The usage threshold at which an in-memory merge will be
- initiated, expressed as a percentage of the total memory allocated to
- storing in-memory map outputs, as defined by
- mapred.job.shuffle.input.buffer.percent.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.job.shuffle.input.buffer.percent">mapred.job.shuffle.input.buffer.percent</a></td><td>0.70</td><td>The percentage of memory to be allocated from the maximum heap
- size to storing map outputs during the shuffle.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.job.reduce.input.buffer.percent">mapred.job.reduce.input.buffer.percent</a></td><td>0.0</td><td>The percentage of memory- relative to the maximum heap size- to
- retain map outputs during the reduce. When the shuffle is concluded, any
- remaining map outputs in memory must consume less than this threshold before
- the reduce can begin.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.map.tasks.speculative.execution">mapred.map.tasks.speculative.execution</a></td><td>true</td><td>If true, then multiple instances of some map tasks
- may be executed in parallel.</td>
- </tr>
- <tr>
- <td><a name="mapred.reduce.tasks.speculative.execution">mapred.reduce.tasks.speculative.execution</a></td><td>true</td><td>If true, then multiple instances of some reduce tasks
- may be executed in parallel.</td>
- </tr>
- <tr>
- <td><a name="mapred.job.reuse.jvm.num.tasks">mapred.job.reuse.jvm.num.tasks</a></td><td>1</td><td>How many tasks to run per jvm. If set to -1, there is
- no limit.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.min.split.size">mapred.min.split.size</a></td><td>0</td><td>The minimum size chunk that map input should be split
- into. Note that some file formats may have minimum split sizes that
- take priority over this setting.</td>
- </tr>
- <tr>
- <td><a name="mapred.jobtracker.maxtasks.per.job">mapred.jobtracker.maxtasks.per.job</a></td><td>-1</td><td>The maximum number of tasks for a single job.
- A value of -1 indicates that there is no maximum. </td>
- </tr>
- <tr>
- <td><a name="mapred.submit.replication">mapred.submit.replication</a></td><td>10</td><td>The replication level for submitted job files. This
- should be around the square root of the number of nodes.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.tasktracker.dns.interface">mapred.tasktracker.dns.interface</a></td><td>default</td><td>The name of the Network Interface from which a task
- tracker should report its IP address.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.tasktracker.dns.nameserver">mapred.tasktracker.dns.nameserver</a></td><td>default</td><td>The host name or IP address of the name server (DNS)
- which a TaskTracker should use to determine the host name used by
- the JobTracker for communication and display purposes.
- </td>
- </tr>
- <tr>
- <td><a name="tasktracker.http.threads">tasktracker.http.threads</a></td><td>40</td><td>The number of worker threads that for the http server. This is
- used for map output fetching
- </td>
- </tr>
- <tr>
- <td><a name="mapred.task.tracker.http.address">mapred.task.tracker.http.address</a></td><td>0.0.0.0:50060</td><td>
- The task tracker http server address and port.
- If the port is 0 then the server will start on a free port.
- </td>
- </tr>
- <tr>
- <td><a name="keep.failed.task.files">keep.failed.task.files</a></td><td>false</td><td>Should the files for failed tasks be kept. This should only be
- used on jobs that are failing, because the storage is never
- reclaimed. It also prevents the map outputs from being erased
- from the reduce directory as they are consumed.</td>
- </tr>
- <tr>
- <td><a name="mapred.output.compress">mapred.output.compress</a></td><td>false</td><td>Should the job outputs be compressed?
- </td>
- </tr>
- <tr>
- <td><a name="mapred.output.compression.type">mapred.output.compression.type</a></td><td>RECORD</td><td>If the job outputs are to compressed as SequenceFiles, how should
- they be compressed? Should be one of NONE, RECORD or BLOCK.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.output.compression.codec">mapred.output.compression.codec</a></td><td>org.apache.hadoop.io.compress.DefaultCodec</td><td>If the job outputs are compressed, how should they be compressed?
- </td>
- </tr>
- <tr>
- <td><a name="mapred.compress.map.output">mapred.compress.map.output</a></td><td>false</td><td>Should the outputs of the maps be compressed before being
- sent across the network. Uses SequenceFile compression.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.map.output.compression.codec">mapred.map.output.compression.codec</a></td><td>org.apache.hadoop.io.compress.DefaultCodec</td><td>If the map outputs are compressed, how should they be
- compressed?
- </td>
- </tr>
- <tr>
- <td><a name="io.seqfile.compress.blocksize">io.seqfile.compress.blocksize</a></td><td>1000000</td><td>The minimum block size for compression in block compressed
- SequenceFiles.
- </td>
- </tr>
- <tr>
- <td><a name="io.seqfile.lazydecompress">io.seqfile.lazydecompress</a></td><td>true</td><td>Should values of block-compressed SequenceFiles be decompressed
- only when necessary.
- </td>
- </tr>
- <tr>
- <td><a name="io.seqfile.sorter.recordlimit">io.seqfile.sorter.recordlimit</a></td><td>1000000</td><td>The limit on number of records to be kept in memory in a spill
- in SequenceFiles.Sorter
- </td>
- </tr>
- <tr>
- <td><a name="map.sort.class">map.sort.class</a></td><td>org.apache.hadoop.util.QuickSort</td><td>The default sort class for sorting keys.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.userlog.limit.kb">mapred.userlog.limit.kb</a></td><td>0</td><td>The maximum size of user-logs of each task in KB. 0 disables the cap.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.userlog.retain.hours">mapred.userlog.retain.hours</a></td><td>24</td><td>The maximum time, in hours, for which the user-logs are to be
- retained.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.hosts">mapred.hosts</a></td><td></td><td>Names a file that contains the list of nodes that may
- connect to the jobtracker. If the value is empty, all hosts are
- permitted.</td>
- </tr>
- <tr>
- <td><a name="mapred.hosts.exclude">mapred.hosts.exclude</a></td><td></td><td>Names a file that contains the list of hosts that
- should be excluded by the jobtracker. If the value is empty, no
- hosts are excluded.</td>
- </tr>
- <tr>
- <td><a name="mapred.max.tracker.failures">mapred.max.tracker.failures</a></td><td>4</td><td>The number of task-failures on a tasktracker of a given job
- after which new tasks of that job aren't assigned to it.
- </td>
- </tr>
- <tr>
- <td><a name="jobclient.output.filter">jobclient.output.filter</a></td><td>FAILED</td><td>The filter for controlling the output of the task's userlogs sent
- to the console of the JobClient.
- The permissible options are: NONE, KILLED, FAILED, SUCCEEDED and
- ALL.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.job.tracker.persist.jobstatus.active">mapred.job.tracker.persist.jobstatus.active</a></td><td>false</td><td>Indicates if persistency of job status information is
- active or not.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.job.tracker.persist.jobstatus.hours">mapred.job.tracker.persist.jobstatus.hours</a></td><td>0</td><td>The number of hours job status information is persisted in DFS.
- The job status information will be available after it drops of the memory
- queue and between jobtracker restarts. With a zero value the job status
- information is not persisted at all in DFS.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.job.tracker.persist.jobstatus.dir">mapred.job.tracker.persist.jobstatus.dir</a></td><td>/jobtracker/jobsInfo</td><td>The directory where the job status information is persisted
- in a file system to be available after it drops of the memory queue and
- between jobtracker restarts.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.task.profile">mapred.task.profile</a></td><td>false</td><td>To set whether the system should collect profiler
- information for some of the tasks in this job? The information is stored
- in the the user log directory. The value is "true" if task profiling
- is enabled.</td>
- </tr>
- <tr>
- <td><a name="mapred.task.profile.maps">mapred.task.profile.maps</a></td><td>0-2</td><td> To set the ranges of map tasks to profile.
- mapred.task.profile has to be set to true for the value to be accounted.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.task.profile.reduces">mapred.task.profile.reduces</a></td><td>0-2</td><td> To set the ranges of reduce tasks to profile.
- mapred.task.profile has to be set to true for the value to be accounted.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.line.input.format.linespermap">mapred.line.input.format.linespermap</a></td><td>1</td><td> Number of lines per split in NLineInputFormat.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.skip.attempts.to.start.skipping">mapred.skip.attempts.to.start.skipping</a></td><td>2</td><td> The number of Task attempts AFTER which skip mode
- will be kicked off. When skip mode is kicked off, the
- tasks reports the range of records which it will process
- next, to the TaskTracker. So that on failures, TT knows which
- ones are possibly the bad records. On further executions,
- those are skipped.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.skip.map.auto.incr.proc.count">mapred.skip.map.auto.incr.proc.count</a></td><td>true</td><td> The flag which if set to true,
- SkipBadRecords.COUNTER_MAP_PROCESSED_RECORDS is incremented
- by MapRunner after invoking the map function. This value must be set to
- false for applications which process the records asynchronously
- or buffer the input records. For example streaming.
- In such cases applications should increment this counter on their own.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.skip.reduce.auto.incr.proc.count">mapred.skip.reduce.auto.incr.proc.count</a></td><td>true</td><td> The flag which if set to true,
- SkipBadRecords.COUNTER_REDUCE_PROCESSED_GROUPS is incremented
- by framework after invoking the reduce function. This value must be set to
- false for applications which process the records asynchronously
- or buffer the input records. For example streaming.
- In such cases applications should increment this counter on their own.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.skip.out.dir">mapred.skip.out.dir</a></td><td></td><td> If no value is specified here, the skipped records are
- written to the output directory at _logs/skip.
- User can stop writing skipped records by giving the value "none".
- </td>
- </tr>
- <tr>
- <td><a name="mapred.skip.map.max.skip.records">mapred.skip.map.max.skip.records</a></td><td>0</td><td> The number of acceptable skip records surrounding the bad
- record PER bad record in mapper. The number includes the bad record as well.
- To turn the feature of detection/skipping of bad records off, set the
- value to 0.
- The framework tries to narrow down the skipped range by retrying
- until this threshold is met OR all attempts get exhausted for this task.
- Set the value to Long.MAX_VALUE to indicate that framework need not try to
- narrow down. Whatever records(depends on application) get skipped are
- acceptable.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.skip.reduce.max.skip.groups">mapred.skip.reduce.max.skip.groups</a></td><td>0</td><td> The number of acceptable skip groups surrounding the bad
- group PER bad group in reducer. The number includes the bad group as well.
- To turn the feature of detection/skipping of bad groups off, set the
- value to 0.
- The framework tries to narrow down the skipped range by retrying
- until this threshold is met OR all attempts get exhausted for this task.
- Set the value to Long.MAX_VALUE to indicate that framework need not try to
- narrow down. Whatever groups(depends on application) get skipped are
- acceptable.
- </td>
- </tr>
- <tr>
- <td><a name="ipc.client.idlethreshold">ipc.client.idlethreshold</a></td><td>4000</td><td>Defines the threshold number of connections after which
- connections will be inspected for idleness.
- </td>
- </tr>
- <tr>
- <td><a name="ipc.client.kill.max">ipc.client.kill.max</a></td><td>10</td><td>Defines the maximum number of clients to disconnect in one go.
- </td>
- </tr>
- <tr>
- <td><a name="ipc.client.connection.maxidletime">ipc.client.connection.maxidletime</a></td><td>10000</td><td>The maximum time in msec after which a client will bring down the
- connection to the server.
- </td>
- </tr>
- <tr>
- <td><a name="ipc.client.connect.max.retries">ipc.client.connect.max.retries</a></td><td>10</td><td>Indicates the number of retries a client will make to establish
- a server connection.
- </td>
- </tr>
- <tr>
- <td><a name="ipc.server.listen.queue.size">ipc.server.listen.queue.size</a></td><td>128</td><td>Indicates the length of the listen queue for servers accepting
- client connections.
- </td>
- </tr>
- <tr>
- <td><a name="ipc.server.tcpnodelay">ipc.server.tcpnodelay</a></td><td>false</td><td>Turn on/off Nagle's algorithm for the TCP socket connection on
- the server. Setting to true disables the algorithm and may decrease latency
- with a cost of more/smaller packets.
- </td>
- </tr>
- <tr>
- <td><a name="ipc.client.tcpnodelay">ipc.client.tcpnodelay</a></td><td>false</td><td>Turn on/off Nagle's algorithm for the TCP socket connection on
- the client. Setting to true disables the algorithm and may decrease latency
- with a cost of more/smaller packets.
- </td>
- </tr>
- <tr>
- <td><a name="job.end.retry.attempts">job.end.retry.attempts</a></td><td>0</td><td>Indicates how many times hadoop should attempt to contact the
- notification URL </td>
- </tr>
- <tr>
- <td><a name="job.end.retry.interval">job.end.retry.interval</a></td><td>30000</td><td>Indicates time in milliseconds between notification URL retry
- calls</td>
- </tr>
- <tr>
- <td><a name="webinterface.private.actions">webinterface.private.actions</a></td><td>false</td><td> If set to true, the web interfaces of JT and NN may contain
- actions, such as kill job, delete file, etc., that should
- not be exposed to public. Enable this option if the interfaces
- are only reachable by those who have the right authorization.
- </td>
- </tr>
- <tr>
- <td><a name="hadoop.rpc.socket.factory.class.default">hadoop.rpc.socket.factory.class.default</a></td><td>org.apache.hadoop.net.StandardSocketFactory</td><td> Default SocketFactory to use. This parameter is expected to be
- formatted as "package.FactoryClassName".
- </td>
- </tr>
- <tr>
- <td><a name="hadoop.rpc.socket.factory.class.ClientProtocol">hadoop.rpc.socket.factory.class.ClientProtocol</a></td><td></td><td> SocketFactory to use to connect to a DFS. If null or empty, use
- hadoop.rpc.socket.class.default. This socket factory is also used by
- DFSClient to create sockets to DataNodes.
- </td>
- </tr>
- <tr>
- <td><a name="hadoop.rpc.socket.factory.class.JobSubmissionProtocol">hadoop.rpc.socket.factory.class.JobSubmissionProtocol</a></td><td></td><td> SocketFactory to use to connect to a Map/Reduce master
- (JobTracker). If null or empty, then use hadoop.rpc.socket.class.default.
- </td>
- </tr>
- <tr>
- <td><a name="hadoop.socks.server">hadoop.socks.server</a></td><td></td><td> Address (host:port) of the SOCKS server to be used by the
- SocksSocketFactory.
- </td>
- </tr>
- <tr>
- <td><a name="topology.node.switch.mapping.impl">topology.node.switch.mapping.impl</a></td><td>org.apache.hadoop.net.ScriptBasedMapping</td><td> The default implementation of the DNSToSwitchMapping. It
- invokes a script specified in topology.script.file.name to resolve
- node names. If the value for topology.script.file.name is not set, the
- default value of DEFAULT_RACK is returned for all node names.
- </td>
- </tr>
- <tr>
- <td><a name="topology.script.file.name">topology.script.file.name</a></td><td></td><td> The script name that should be invoked to resolve DNS names to
- NetworkTopology names. Example: the script would take host.foo.bar as an
- argument, and return /rack1 as the output.
- </td>
- </tr>
- <tr>
- <td><a name="topology.script.number.args">topology.script.number.args</a></td><td>100</td><td> The max number of args that the script configured with
- topology.script.file.name should be run with. Each arg is an
- IP address.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.task.cache.levels">mapred.task.cache.levels</a></td><td>2</td><td> This is the max level of the task cache. For example, if
- the level is 2, the tasks cached are at the host level and at the rack
- level.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.tasktracker.tasks.maxmemory">mapred.tasktracker.tasks.maxmemory</a></td><td>-1</td><td> The maximum amount of virtual memory in kilobytes all tasks
- running on a tasktracker, including sub-processes they launch, can use.
- This value is used to compute the amount of free memory available for
- tasks. Any task scheduled on this tasktracker is guaranteed and constrained
- to use a share of this amount. Any task exceeding its share will be
- killed. If set to -1, this functionality is disabled, and
- mapred.task.maxmemory is ignored. Further, it will be enabled only on the
- systems where org.apache.hadoop.util.ProcfsBasedProcessTree is available,
- i.e at present only on Linux.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.task.maxmemory">mapred.task.maxmemory</a></td><td>-1</td><td> The maximum amount of memory in kilobytes any task of a job
- will use. A task of this job will be scheduled on a tasktracker, only if
- the amount of free memory on the tasktracker is greater than or
- equal to this value. If set to -1, tasks are assured a memory limit on
- the tasktracker equal to
- mapred.tasktracker.tasks.maxmemory/number of slots. If the value of
- mapred.tasktracker.tasks.maxmemory is set to -1, this value is ignored.
-
- Note: If mapred.child.java.opts is specified with an Xmx value, or if
- mapred.child.ulimit is specified, then the value of mapred.task.maxmemory
- must be set to a higher value than these. If not, the task might be
- killed even though these limits are not reached.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.queue.names">mapred.queue.names</a></td><td>default</td><td> Comma separated list of queues configured for this jobtracker.
- Jobs are added to queues and schedulers can configure different
- scheduling properties for the various queues. To configure a property
- for a queue, the name of the queue must match the name specified in this
- value. Queue properties that are common to all schedulers are configured
- here with the naming convention, mapred.queue.$QUEUE-NAME.$PROPERTY-NAME,
- for e.g. mapred.queue.default.submit-job-acl.
- The number of queues configured in this parameter could depend on the
- type of scheduler being used, as specified in
- mapred.jobtracker.taskScheduler. For example, the JobQueueTaskScheduler
- supports only a single queue, which is the default configured here.
- Before adding more queues, ensure that the scheduler you've configured
- supports multiple queues.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.acls.enabled">mapred.acls.enabled</a></td><td>false</td><td> Specifies whether ACLs are enabled, and should be checked
- for various operations.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.queue.default.acl-submit-job">mapred.queue.default.acl-submit-job</a></td><td>*</td><td> Comma separated list of user and group names that are allowed
- to submit jobs to the 'default' queue. The user list and the group list
- are separated by a blank. For e.g. alice,bob group1,group2.
- If set to the special value '*', it means all users are allowed to
- submit jobs.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.queue.default.acl-administer-jobs">mapred.queue.default.acl-administer-jobs</a></td><td>*</td><td> Comma separated list of user and group names that are allowed
- to delete jobs or modify job's priority for jobs not owned by the current
- user in the 'default' queue. The user list and the group list
- are separated by a blank. For e.g. alice,bob group1,group2.
- If set to the special value '*', it means all users are allowed to do
- this operation.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.job.queue.name">mapred.job.queue.name</a></td><td>default</td><td> Queue to which a job is submitted. This must match one of the
- queues defined in mapred.queue.names for the system. Also, the ACL setup
- for the queue must allow the current user to submit a job to the queue.
- Before specifying a queue, ensure that the system is configured with
- the queue, and access is allowed for submitting jobs to the queue.
- </td>
- </tr>
- <tr>
- <td><a name="mapred.tasktracker.indexcache.mb">mapred.tasktracker.indexcache.mb</a></td><td>10</td><td> The maximum memory that a task tracker allows for the
- index cache that is used when serving map outputs to reducers.
- </td>
- </tr>
- </table>
- </body>
- </html>
|