123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925 |
- <?xml version="1.0"?>
- <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
- <!-- Do not modify this file directly. Instead, copy entries that you -->
- <!-- wish to modify from this file into hadoop-site.xml and change them -->
- <!-- there. If hadoop-site.xml does not already exist, create it. -->
- <configuration>
- <!--- global properties -->
- <property>
- <name>hadoop.tmp.dir</name>
- <value>/tmp/hadoop-${user.name}</value>
- <description>A base for other temporary directories.</description>
- </property>
- <property>
- <name>hadoop.native.lib</name>
- <value>true</value>
- <description>Should native hadoop libraries, if present, be used.</description>
- </property>
- <!--- logging properties -->
- <property>
- <name>hadoop.logfile.size</name>
- <value>10000000</value>
- <description>The max size of each log file</description>
- </property>
- <property>
- <name>hadoop.logfile.count</name>
- <value>10</value>
- <description>The max number of log files</description>
- </property>
- <property>
- <name>dfs.namenode.logging.level</name>
- <value>info</value>
- <description>The logging level for dfs namenode. Other values are "dir"(trac
- e namespace mutations), "block"(trace block under/over replications and block
- creations/deletions), or "all".</description>
- </property>
- <!-- i/o properties -->
- <property>
- <name>io.sort.factor</name>
- <value>10</value>
- <description>The number of streams to merge at once while sorting
- files. This determines the number of open file handles.</description>
- </property>
- <property>
- <name>io.sort.mb</name>
- <value>100</value>
- <description>The total amount of buffer memory to use while sorting
- files, in megabytes. By default, gives each merge stream 1MB, which
- should minimize seeks.</description>
- </property>
- <property>
- <name>io.file.buffer.size</name>
- <value>4096</value>
- <description>The size of buffer for use in sequence files.
- The size of this buffer should probably be a multiple of hardware
- page size (4096 on Intel x86), and it determines how much data is
- buffered during read and write operations.</description>
- </property>
-
- <property>
- <name>io.bytes.per.checksum</name>
- <value>512</value>
- <description>The number of bytes per checksum. Must not be larger than
- io.file.buffer.size.</description>
- </property>
- <property>
- <name>io.skip.checksum.errors</name>
- <value>false</value>
- <description>If true, when a checksum error is encountered while
- reading a sequence file, entries are skipped, instead of throwing an
- exception.</description>
- </property>
-
- <property>
- <name>io.map.index.skip</name>
- <value>0</value>
- <description>Number of index entries to skip between each entry.
- Zero by default. Setting this to values larger than zero can
- facilitate opening large map files using less memory.</description>
- </property>
- <property>
- <name>io.compression.codecs</name>
- <value>org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec</value>
- <description>A list of the compression codec classes that can be used
- for compression/decompression.</description>
- </property>
- <!-- file system properties -->
- <property>
- <name>fs.default.name</name>
- <value>file:///</value>
- <description>The name of the default file system. A URI whose
- scheme and authority determine the FileSystem implementation. The
- uri's scheme determines the config property (fs.SCHEME.impl) naming
- the FileSystem implementation class. The uri's authority is used to
- determine the host, port, etc. for a filesystem.</description>
- </property>
- <property>
- <name>fs.trash.root</name>
- <value>${hadoop.tmp.dir}/Trash</value>
- <description>The trash directory, used by FsShell's 'rm' command.
- </description>
- </property>
- <property>
- <name>fs.trash.interval</name>
- <value>0</value>
- <description>Number of minutes between trash checkpoints.
- If zero, the trash feature is disabled.
- </description>
- </property>
- <property>
- <name>fs.file.impl</name>
- <value>org.apache.hadoop.fs.LocalFileSystem</value>
- <description>The FileSystem for file: uris.</description>
- </property>
- <property>
- <name>fs.hdfs.impl</name>
- <value>org.apache.hadoop.dfs.DistributedFileSystem</value>
- <description>The FileSystem for hdfs: uris.</description>
- </property>
- <property>
- <name>fs.s3.impl</name>
- <value>org.apache.hadoop.fs.s3.S3FileSystem</value>
- <description>The FileSystem for s3: uris.</description>
- </property>
- <property>
- <name>fs.ramfs.impl</name>
- <value>org.apache.hadoop.fs.InMemoryFileSystem</value>
- <description>The FileSystem for ramfs: uris.</description>
- </property>
- <property>
- <name>fs.inmemory.size.mb</name>
- <value>75</value>
- <description>The size of the in-memory filsystem instance in MB</description>
- </property>
- <property>
- <name>fs.checkpoint.dir</name>
- <value>${hadoop.tmp.dir}/dfs/namesecondary</value>
- <description>Determines where on the local filesystem the DFS secondary
- name node should store the temporary images and edits to merge.
- </description>
- </property>
- <property>
- <name>fs.checkpoint.period</name>
- <value>3600</value>
- <description>The number of seconds between two periodic checkpoints.
- </description>
- </property>
- <property>
- <name>fs.checkpoint.size</name>
- <value>67108864</value>
- <description>The size of the current edit log (in bytes) that triggers
- a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
- </description>
- </property>
- <property>
- <name>dfs.secondary.info.port</name>
- <value>50090</value>
- <description>The base number for the Secondary namenode info port.
- </description>
- </property>
- <property>
- <name>dfs.secondary.info.bindAddress</name>
- <value>0.0.0.0</value>
- <description>
- The address where the secondary namenode web UI will listen to.
- </description>
- </property>
- <property>
- <name>dfs.datanode.bindAddress</name>
- <value>0.0.0.0</value>
- <description>
- the address where the datanode will listen to.
- </description>
- </property>
- <property>
- <name>dfs.datanode.port</name>
- <value>50010</value>
- <description>The port number that the dfs datanode server uses as a starting
- point to look for a free port to listen on.
- </description>
- </property>
- <property>
- <name>dfs.info.bindAddress</name>
- <value>0.0.0.0</value>
- <description>
- the address where the dfs namenode web ui will listen on.
- </description>
- </property>
- <property>
- <name>dfs.info.port</name>
- <value>50070</value>
- <description>The base port number for the dfs namenode web ui.
- </description>
- </property>
- <property>
- <name>dfs.datanode.dns.interface</name>
- <value>default</value>
- <description>The name of the Network Interface from which a data node should
- report its IP address.
- </description>
- </property>
-
- <property>
- <name>dfs.datanode.dns.nameserver</name>
- <value>default</value>
- <description>The host name or IP address of the name server (DNS)
- which a DataNode should use to determine the host name used by the
- NameNode for communication and display purposes.
- </description>
- </property>
-
- <property>
- <name>dfs.replication.considerLoad</name>
- <value>true</value>
- <description>Decide if chooseTarget considers the target's load or not
- </description>
- </property>
- <property>
- <name>dfs.default.chunk.view.size</name>
- <value>32768</value>
- <description>The number of bytes to view for a file on the browser.
- </description>
- </property>
- <property>
- <name>dfs.datanode.du.reserved</name>
- <value>0</value>
- <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
- </description>
- </property>
- <property>
- <name>dfs.datanode.du.pct</name>
- <value>0.98f</value>
- <description>When calculating remaining space, only use this percentage of the real available space
- </description>
- </property>
- <property>
- <name>dfs.name.dir</name>
- <value>${hadoop.tmp.dir}/dfs/name</value>
- <description>Determines where on the local filesystem the DFS name node
- should store the name table. If this is a comma-delimited list
- of directories then the name table is replicated in all of the
- directories, for redundancy. </description>
- </property>
- <property>
- <name>dfs.client.buffer.dir</name>
- <value>${hadoop.tmp.dir}/dfs/tmp</value>
- <description>Determines where on the local filesystem an DFS client
- should store its blocks before it sends them to the datanode.
- </description>
- </property>
- <property>
- <name>dfs.data.dir</name>
- <value>${hadoop.tmp.dir}/dfs/data</value>
- <description>Determines where on the local filesystem an DFS data node
- should store its blocks. If this is a comma-delimited
- list of directories, then data will be stored in all named
- directories, typically on different devices.
- Directories that do not exist are ignored.
- </description>
- </property>
- <property>
- <name>dfs.replication</name>
- <value>3</value>
- <description>Default block replication.
- The actual number of replications can be specified when the file is created.
- The default is used if replication is not specified in create time.
- </description>
- </property>
- <property>
- <name>dfs.replication.max</name>
- <value>512</value>
- <description>Maximal block replication.
- </description>
- </property>
- <property>
- <name>dfs.replication.min</name>
- <value>1</value>
- <description>Minimal block replication.
- </description>
- </property>
- <property>
- <name>dfs.block.size</name>
- <value>67108864</value>
- <description>The default block size for new files.</description>
- </property>
- <property>
- <name>dfs.df.interval</name>
- <value>60000</value>
- <description>Disk usage statistics refresh interval in msec.</description>
- </property>
- <property>
- <name>dfs.client.block.write.retries</name>
- <value>3</value>
- <description>The number of retries for writing blocks to the data nodes,
- before we signal failure to the application.
- </description>
- </property>
- <property>
- <name>dfs.blockreport.intervalMsec</name>
- <value>3600000</value>
- <description>Determines block reporting interval in milliseconds.</description>
- </property>
- <property>
- <name>dfs.heartbeat.interval</name>
- <value>3</value>
- <description>Determines datanode heartbeat interval in seconds.</description>
- </property>
- <property>
- <name>dfs.namenode.handler.count</name>
- <value>10</value>
- <description>The number of server threads for the namenode.</description>
- </property>
- <property>
- <name>dfs.safemode.threshold.pct</name>
- <value>0.999f</value>
- <description>
- Specifies the percentage of blocks that should satisfy
- the minimal replication requirement defined by dfs.replication.min.
- Values less than or equal to 0 mean not to start in safe mode.
- Values greater than 1 will make safe mode permanent.
- </description>
- </property>
- <property>
- <name>dfs.safemode.extension</name>
- <value>30000</value>
- <description>
- Determines extension of safe mode in milliseconds
- after the threshold level is reached.
- </description>
- </property>
- <property>
- <name>dfs.network.script</name>
- <value></value>
- <description>
- Specifies a script name that print the network location path
- of the current machine.
- </description>
- </property>
- <property>
- <name>dfs.hosts</name>
- <value></value>
- <description>Names a file that contains a list of hosts that are
- permitted to connect to the namenode. The full pathname of the file
- must be specified. If the value is empty, all hosts are
- permitted.</description>
- </property>
- <property>
- <name>dfs.hosts.exclude</name>
- <value></value>
- <description>Names a file that contains a list of hosts that are
- not permitted to connect to the namenode. The full pathname of the
- file must be specified. If the value is empty, no hosts are
- excluded.</description>
- </property>
- <property>
- <name>fs.s3.block.size</name>
- <value>67108864</value>
- <description>Block size to use when writing files to S3.</description>
- </property>
- <property>
- <name>fs.s3.buffer.dir</name>
- <value>${hadoop.tmp.dir}/s3</value>
- <description>Determines where on the local filesystem the S3 filesystem
- should store its blocks before it sends them to S3
- or after it retrieves them from S3.
- </description>
- </property>
- <property>
- <name>fs.s3.maxRetries</name>
- <value>4</value>
- <description>The maximum number of retries for reading or writing blocks to S3,
- before we signal failure to the application.
- </description>
- </property>
- <property>
- <name>fs.s3.sleepTimeSeconds</name>
- <value>10</value>
- <description>The number of seconds to sleep between each S3 retry.
- </description>
- </property>
- <!-- map/reduce properties -->
- <property>
- <name>mapred.job.tracker</name>
- <value>local</value>
- <description>The host and port that the MapReduce job tracker runs
- at. If "local", then jobs are run in-process as a single map
- and reduce task.
- </description>
- </property>
- <property>
- <name>mapred.job.tracker.info.bindAddress</name>
- <value>0.0.0.0</value>
- <description>
- the address where the job tracker info webserver will be binded on.
- </description>
- </property>
- <property>
- <name>mapred.job.tracker.info.port</name>
- <value>50030</value>
- <description>The port that the MapReduce job tracker info webserver runs at.
- </description>
- </property>
- <property>
- <name>mapred.task.tracker.report.bindAddress</name>
- <value>0.0.0.0</value>
- <description>
- the address where the maperd tracker report server will be binded on.
- </description>
- </property>
- <property>
- <name>mapred.task.tracker.report.port</name>
- <value>50050</value>
- <description>The port number that the MapReduce task tracker report server uses as a starting
- point to look for a free port to listen on.
- </description>
- </property>
- <property>
- <name>mapred.local.dir</name>
- <value>${hadoop.tmp.dir}/mapred/local</value>
- <description>The local directory where MapReduce stores intermediate
- data files. May be a comma-separated list of
- directories on different devices in order to spread disk i/o.
- Directories that do not exist are ignored.
- </description>
- </property>
- <property>
- <name>local.cache.size</name>
- <value>10737418240</value>
- <description>The limit on the size of cache you want to keep, set by default
- to 10GB. This will act as a soft limit on the cache directory for out of band data.
- </description>
- </property>
-
- <property>
- <name>mapred.system.dir</name>
- <value>${hadoop.tmp.dir}/mapred/system</value>
- <description>The shared directory where MapReduce stores control files.
- </description>
- </property>
- <property>
- <name>mapred.temp.dir</name>
- <value>${hadoop.tmp.dir}/mapred/temp</value>
- <description>A shared directory for temporary files.
- </description>
- </property>
- <property>
- <name>mapred.local.dir.minspacestart</name>
- <value>0</value>
- <description>If the space in mapred.local.dir drops under this,
- do not ask for more tasks.
- Value in bytes.
- </description>
- </property>
- <property>
- <name>mapred.local.dir.minspacekill</name>
- <value>0</value>
- <description>If the space in mapred.local.dir drops under this,
- do not ask more tasks until all the current ones have finished and
- cleaned up. Also, to save the rest of the tasks we have running,
- kill one of them, to clean up some space. Start with the reduce tasks,
- then go with the ones that have finished the least.
- Value in bytes.
- </description>
- </property>
- <property>
- <name>mapred.tasktracker.expiry.interval</name>
- <value>600000</value>
- <description>Expert: The time-interval, in miliseconds, after which
- a tasktracker is declared 'lost' if it doesn't send heartbeats.
- </description>
- </property>
- <property>
- <name>mapred.map.tasks</name>
- <value>2</value>
- <description>The default number of map tasks per job. Typically set
- to a prime several times greater than number of available hosts.
- Ignored when mapred.job.tracker is "local".
- </description>
- </property>
- <property>
- <name>mapred.reduce.tasks</name>
- <value>1</value>
- <description>The default number of reduce tasks per job. Typically set
- to a prime close to the number of available hosts. Ignored when
- mapred.job.tracker is "local".
- </description>
- </property>
- <property>
- <name>mapred.map.max.attempts</name>
- <value>4</value>
- <description>Expert: The maximum number of attempts per map task.
- In other words, framework will try to execute a map task these many number
- of times before giving up on it.
- </description>
- </property>
- <property>
- <name>mapred.reduce.max.attempts</name>
- <value>4</value>
- <description>Expert: The maximum number of attempts per reduce task.
- In other words, framework will try to execute a reduce task these many number
- of times before giving up on it.
- </description>
- </property>
- <property>
- <name>mapred.reduce.parallel.copies</name>
- <value>5</value>
- <description>The default number of parallel transfers run by reduce
- during the copy(shuffle) phase.
- </description>
- </property>
- <property>
- <name>mapred.task.timeout</name>
- <value>600000</value>
- <description>The number of milliseconds before a task will be
- terminated if it neither reads an input, writes an output, nor
- updates its status string.
- </description>
- </property>
- <property>
- <name>mapred.tasktracker.tasks.maximum</name>
- <value>2</value>
- <description>The maximum number of tasks that will be run
- simultaneously by a task tracker.
- </description>
- </property>
- <property>
- <name>mapred.jobtracker.completeuserjobs.maximum</name>
- <value>100</value>
- <description>The maximum number of complete jobs per user to keep around before delegating them to the job history.
- </description>
- </property>
- <property>
- <name>mapred.child.java.opts</name>
- <value>-Xmx200m</value>
- <description>Java opts for the task tracker child processes. Subsumes
- 'mapred.child.heap.size' (If a mapred.child.heap.size value is found
- in a configuration, its maximum heap size will be used and a warning
- emitted that heap.size has been deprecated). Also, the following symbols,
- if present, will be interpolated: @taskid@ is replaced by current TaskID;
- and @port@ will be replaced by mapred.task.tracker.report.port + 1 (A second
- child will fail with a port-in-use if mapred.tasktracker.tasks.maximum is
- greater than one). Any other occurrences of '@' will go unchanged. For
- example, to enable verbose gc logging to a file named for the taskid in
- /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
- -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
- </description>
- </property>
- <property>
- <name>mapred.inmem.merge.threshold</name>
- <value>1000</value>
- <description>The threshold, in terms of the number of files
- for the in-memory merge process. When we accumulate threshold number of files
- we initiate the in-memory merge and spill to disk. A value of 0 or less than
- 0 indicates we want to DON'T have any threshold and instead depend only on
- the ramfs's memory consumption to trigger the merge.
- </description>
- </property>
- <property>
- <name>mapred.speculative.execution</name>
- <value>true</value>
- <description>If true, then multiple instances of some map and reduce tasks
- may be executed in parallel.</description>
- </property>
- <property>
- <name>mapred.min.split.size</name>
- <value>0</value>
- <description>The minimum size chunk that map input should be split
- into. Note that some file formats may have minimum split sizes that
- take priority over this setting.</description>
- </property>
- <property>
- <name>mapred.submit.replication</name>
- <value>10</value>
- <description>The replication level for submitted job files. This
- should be around the square root of the number of nodes.
- </description>
- </property>
- <property>
- <name>mapred.tasktracker.dns.interface</name>
- <value>default</value>
- <description>The name of the Network Interface from which a task
- tracker should report its IP address.
- </description>
- </property>
-
- <property>
- <name>mapred.tasktracker.dns.nameserver</name>
- <value>default</value>
- <description>The host name or IP address of the name server (DNS)
- which a TaskTracker should use to determine the host name used by
- the JobTracker for communication and display purposes.
- </description>
- </property>
-
- <property>
- <name>tasktracker.http.threads</name>
- <value>40</value>
- <description>The number of worker threads that for the http server. This is
- used for map output fetching
- </description>
- </property>
- <property>
- <name>tasktracker.http.bindAddress</name>
- <value>0.0.0.0</value>
- <description>
- the address where the task tracker http server will be binded on.
- </description>
- </property>
- <property>
- <name>tasktracker.http.port</name>
- <value>50060</value>
- <description>The default port for task trackers to use as their http server.
- </description>
- </property>
- <property>
- <name>keep.failed.task.files</name>
- <value>false</value>
- <description>Should the files for failed tasks be kept. This should only be
- used on jobs that are failing, because the storage is never
- reclaimed. It also prevents the map outputs from being erased
- from the reduce directory as they are consumed.</description>
- </property>
- <!--
- <property>
- <name>keep.task.files.pattern</name>
- <value>.*_m_123456_0</value>
- <description>Keep all files from tasks whose task names match the given
- regular expression. Defaults to none.</description>
- </property>
- -->
- <property>
- <name>mapred.output.compress</name>
- <value>false</value>
- <description>Should the outputs of the reduces be compressed?
- </description>
- </property>
- <property>
- <name>mapred.output.compression.codec</name>
- <value>org.apache.hadoop.io.compress.DefaultCodec</value>
- <description>If the reduce outputs are compressed, how should they be
- compressed?
- </description>
- </property>
- <property>
- <name>mapred.compress.map.output</name>
- <value>false</value>
- <description>Should the outputs of the maps be compressed before being
- sent across the network. Uses SequenceFile compression.
- </description>
- </property>
- <property>
- <name>io.seqfile.compress.blocksize</name>
- <value>1000000</value>
- <description>The minimum block size for compression in block compressed
- SequenceFiles.
- </description>
- </property>
- <property>
- <name>io.seqfile.lazydecompress</name>
- <value>true</value>
- <description>Should values of block-compressed SequenceFiles be decompressed
- only when necessary.
- </description>
- </property>
- <property>
- <name>io.seqfile.sorter.recordlimit</name>
- <value>1000000</value>
- <description>The limit on number of records to be kept in memory in a spill
- in SequenceFiles.Sorter
- </description>
- </property>
- <property>
- <name>io.seqfile.compression.type</name>
- <value>RECORD</value>
- <description>The default compression type for SequenceFile.Writer.
- </description>
- </property>
- <property>
- <name>map.sort.class</name>
- <value>org.apache.hadoop.mapred.MergeSorter</value>
- <description>The default sort class for sorting keys.
- </description>
- </property>
- <property>
- <name>mapred.userlog.limit.kb</name>
- <value>0</value>
- <description>The maximum size of user-logs of each task in KB. 0 disables the cap.
- </description>
- </property>
- <property>
- <name>mapred.userlog.retain.hours</name>
- <value>24</value>
- <description>The maximum time, in hours, for which the user-logs are to be
- retained.
- </description>
- </property>
- <property>
- <name>mapred.hosts</name>
- <value></value>
- <description>Names a file that contains the list of nodes that may
- connect to the jobtracker. If the value is empty, all hosts are
- permitted.</description>
- </property>
- <property>
- <name>mapred.hosts.exclude</name>
- <value></value>
- <description>Names a file that contains the list of hosts that
- should be excluded by the jobtracker. If the value is empty, no
- hosts are excluded.</description>
- </property>
- <property>
- <name>mapred.max.tracker.failures</name>
- <value>4</value>
- <description>The number of task-failures on a tasktracker of a given job
- after which new tasks of that job aren't assigned to it.
- </description>
- </property>
- <property>
- <name>jobclient.output.filter</name>
- <value>FAILED</value>
- <description>The filter for controlling the output of the task's userlogs sent
- to the console of the JobClient.
- The permissible options are: NONE, FAILED, SUCCEEDED and ALL.
- </description>
- </property>
- <!-- ipc properties -->
- <property>
- <name>ipc.client.timeout</name>
- <value>60000</value>
- <description>Defines the timeout for IPC calls in milliseconds.</description>
- </property>
- <property>
- <name>ipc.client.idlethreshold</name>
- <value>4000</value>
- <description>Defines the threshold number of connections after which
- connections will be inspected for idleness.
- </description>
- </property>
- <property>
- <name>ipc.client.maxidletime</name>
- <value>120000</value>
- <description>Defines the maximum idle time for a connected client after
- which it may be disconnected.
- </description>
- </property>
- <property>
- <name>ipc.client.kill.max</name>
- <value>10</value>
- <description>Defines the maximum number of clients to disconnect in one go.
- </description>
- </property>
- <property>
- <name>ipc.client.connection.maxidletime</name>
- <value>1000</value>
- <description>The maximum time after which a client will bring down the
- connection to the server.
- </description>
- </property>
- <property>
- <name>ipc.client.connect.max.retries</name>
- <value>10</value>
- <description>Indicates the number of retries a client will make to establish
- a server connection.
- </description>
- </property>
- <property>
- <name>ipc.server.listen.queue.size</name>
- <value>128</value>
- <description>Indicates the length of the listen queue for servers accepting
- client connections.
- </description>
- </property>
- <!-- Job Notification Configuration -->
- <!--
- <property>
- <name>job.end.notification.url</name>
- <value>http://localhost:8080/jobstatus.php?jobId=$jobId&jobStatus=$jobStatus</value>
- <description>Indicates url which will be called on completion of job to inform
- end status of job.
- User can give at most 2 variables with URI : $jobId and $jobStatus.
- If they are present in URI, then they will be replaced by their
- respective values.
- </description>
- </property>
- -->
- <property>
- <name>job.end.retry.attempts</name>
- <value>0</value>
- <description>Indicates how many times hadoop should attempt to contact the
- notification URL </description>
- </property>
- <property>
- <name>job.end.retry.interval</name>
- <value>30000</value>
- <description>Indicates time in milliseconds between notification URL retry
- calls</description>
- </property>
- <!-- Web Interface Configuration -->
- <property>
- <name>webinterface.private.actions</name>
- <value>false</value>
- <description> If set to true, the web interfaces of JT and NN may contain
- actions, such as kill job, delete file, etc., that should
- not be exposed to public. Enable this option if the interfaces
- are only reachable by those who have the right authorization.
- </description>
- </property>
- </configuration>
|