1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623 |
- <?xml version="1.0"?>
- <!--
- Copyright 2002-2004 The Apache Software Foundation
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- -->
- <!DOCTYPE document PUBLIC "-//APACHE//DTD Documentation V2.0//EN" "http://forrest.apache.org/dtd/document-v20.dtd">
- <document>
-
- <header>
- <title>Hadoop Map-Reduce Tutorial</title>
- </header>
-
- <body>
-
- <section>
- <title>Purpose</title>
-
- <p>This document comprehensively describes all user-facing facets of the
- Hadoop Map-Reduce framework and serves as a tutorial.
- </p>
- </section>
-
- <section>
- <title>Pre-requisites</title>
-
- <p>Ensure that Hadoop is installed, configured and is running. More
- details:</p>
- <ul>
- <li>
- Hadoop <a href="quickstart.html">Quickstart</a> for first-time users.
- </li>
- <li>
- Hadoop <a href="cluster_setup.html">Cluster Setup</a> for large,
- distributed clusters.
- </li>
- </ul>
- </section>
-
- <section>
- <title>Overview</title>
-
- <p>Hadoop Map-Reduce is a software framework for easily writing
- applications which process vast amounts of data (multi-terabyte data-sets)
- in-parallel on large clusters (thousands of nodes) of commodity
- hardware in a reliable, fault-tolerant manner.</p>
-
- <p>A Map-Reduce <em>job</em> usually splits the input data-set into
- independent chunks which are processed by the <em>map tasks</em> in a
- completely parallel manner. The framework sorts the outputs of the maps,
- which are then input to the <em>reduce tasks</em>. Typically both the
- input and the output of the job are stored in a file-system. The framework
- takes care of scheduling tasks, monitoring them and re-executes the failed
- tasks.</p>
-
- <p>Typically the compute nodes and the storage nodes are the same, that is,
- the Map-Reduce framework and the <a href="hdfs_design.html">Distributed
- FileSystem</a> are running on the same set of nodes. This configuration
- allows the framework to effectively schedule tasks on the nodes where data
- is already present, resulting in very high aggregate bandwidth across the
- cluster.</p>
-
- <p>The Map-Reduce framework consists of a single master
- <code>JobTracker</code> and one slave <code>TaskTracker</code> per
- cluster-node. The master is responsible for scheduling the jobs' component
- tasks on the slaves, monitoring them and re-executing the failed tasks. The
- slaves execute the tasks as directed by the master.</p>
-
- <p>Minimally, applications specify the input/output locations and supply
- <em>map</em> and <em>reduce</em> functions via implementations of
- appropriate interfaces and/or abstract-classes. These, and other job
- parameters, comprise the <em>job configuration</em>. The Hadoop
- <em>job client</em> then submits the job (jar/executable etc.) and
- configuration to the <code>JobTracker</code> which then assumes the
- responsibility of distributing the software/configuration to the slaves,
- scheduling tasks and monitoring them, providing status and diagnostic
- information to the job-client.</p>
-
- <p>Although the Hadoop framework is implemented in Java<sup>TM</sup>,
- Map-Reduce applications need not be written in Java.</p>
- <ul>
- <li>
- <a href="ext:api/org/apache/hadoop/streaming/package-summary">
- Hadoop Streaming</a> is a utility which allows users to create and run
- jobs with any executables (e.g. shell utilities) as the mapper and/or
- the reducer.
- </li>
- <li>
- <a href="ext:api/org/apache/hadoop/mapred/pipes/package-summary">
- Hadoop Pipes</a> is a <a href="http://www.swig.org/">SWIG</a>-
- compatible <em>C++ API</em> to implement Map-Reduce applications (non
- JNI<sup>TM</sup> based).
- </li>
- </ul>
- </section>
-
- <section>
- <title>Inputs and Outputs</title>
- <p>The Map-Reduce framework operates exclusively on
- <code><key, value></code> pairs, that is, the framework views the
- input to the job as a set of <code><key, value></code> pairs and
- produces a set of <code><key, value></code> pairs as the output of
- the job, conceivably of different types.</p>
-
- <p>The <code>key</code> and <code>value</code> classes have to be
- serializable by the framework and hence need to implement the
- <a href="ext:api/org/apache/hadoop/io/writable">Writable</a>
- interface. Additionally, the <code>key</code> classes have to implement the
- <a href="ext:api/org/apache/hadoop/io/writablecomparable">
- WritableComparable</a> interface to facilitate sorting by the framework.
- </p>
- <p>Input and Output types of a Map-Reduce job:</p>
- <p>
- (input) <code><k1, v1></code>
- ->
- <strong>map</strong>
- ->
- <code><k2, v2></code>
- ->
- <strong>combine</strong>
- ->
- <code><k2, v2></code>
- ->
- <strong>reduce</strong>
- ->
- <code><k3, v3></code> (output)
- </p>
- </section>
- <section>
- <title>Example: WordCount v1.0</title>
-
- <p>Before we jump into the details, lets walk through an example Map-Reduce
- application to get a flavour for how they work.</p>
-
- <p><code>WordCount</code> is a simple application that counts the number of
- occurences of each word in a given input set.</p>
-
- <p>This works with a
- <a href="quickstart.html#Standalone+Operation">local-standalone</a>,
- <a href="quickstart.html#SingleNodeSetup">pseudo-distributed</a> or
- <a href="quickstart.html#Fully-Distributed+Operation">fully-distributed</a>
- Hadoop installation.</p>
-
- <section>
- <title>Source Code</title>
-
- <table>
- <tr>
- <th></th>
- <th>WordCount.java</th>
- </tr>
- <tr>
- <td>1.</td>
- <td>
- <code>package org.myorg;</code>
- </td>
- </tr>
- <tr>
- <td>2.</td>
- <td></td>
- </tr>
- <tr>
- <td>3.</td>
- <td>
- <code>import java.io.IOException;</code>
- </td>
- </tr>
- <tr>
- <td>4.</td>
- <td>
- <code>import java.util.*;</code>
- </td>
- </tr>
- <tr>
- <td>5.</td>
- <td></td>
- </tr>
- <tr>
- <td>6.</td>
- <td>
- <code>import org.apache.hadoop.fs.Path;</code>
- </td>
- </tr>
- <tr>
- <td>7.</td>
- <td>
- <code>import org.apache.hadoop.conf.*;</code>
- </td>
- </tr>
- <tr>
- <td>8.</td>
- <td>
- <code>import org.apache.hadoop.io.*;</code>
- </td>
- </tr>
- <tr>
- <td>9.</td>
- <td>
- <code>import org.apache.hadoop.mapred.*;</code>
- </td>
- </tr>
- <tr>
- <td>10.</td>
- <td>
- <code>import org.apache.hadoop.util.*;</code>
- </td>
- </tr>
- <tr>
- <td>11.</td>
- <td></td>
- </tr>
- <tr>
- <td>12.</td>
- <td>
- <code>public class WordCount {</code>
- </td>
- </tr>
- <tr>
- <td>13.</td>
- <td></td>
- </tr>
- <tr>
- <td>14.</td>
- <td>
-
- <code>
- public static class Map extends MapReduceBase
- implements Mapper<LongWritable, Text, Text, IntWritable> {
- </code>
- </td>
- </tr>
- <tr>
- <td>15.</td>
- <td>
-
- <code>
- private final static IntWritable one = new IntWritable(1);
- </code>
- </td>
- </tr>
- <tr>
- <td>16.</td>
- <td>
-
- <code>private Text word = new Text();</code>
- </td>
- </tr>
- <tr>
- <td>17.</td>
- <td></td>
- </tr>
- <tr>
- <td>18.</td>
- <td>
-
- <code>
- public void map(LongWritable key, Text value,
- OutputCollector<Text, IntWritable> output,
- Reporter reporter) throws IOException {
- </code>
- </td>
- </tr>
- <tr>
- <td>19.</td>
- <td>
-
- <code>String line = value.toString();</code>
- </td>
- </tr>
- <tr>
- <td>20.</td>
- <td>
-
- <code>StringTokenizer tokenizer = new StringTokenizer(line);</code>
- </td>
- </tr>
- <tr>
- <td>21.</td>
- <td>
-
- <code>while (tokenizer.hasMoreTokens()) {</code>
- </td>
- </tr>
- <tr>
- <td>22.</td>
- <td>
-
- <code>word.set(tokenizer.nextToken());</code>
- </td>
- </tr>
- <tr>
- <td>23.</td>
- <td>
-
- <code>output.collect(word, one);</code>
- </td>
- </tr>
- <tr>
- <td>24.</td>
- <td>
-
- <code>}</code>
- </td>
- </tr>
- <tr>
- <td>25.</td>
- <td>
-
- <code>}</code>
- </td>
- </tr>
- <tr>
- <td>26.</td>
- <td>
-
- <code>}</code>
- </td>
- </tr>
- <tr>
- <td>27.</td>
- <td></td>
- </tr>
- <tr>
- <td>28.</td>
- <td>
-
- <code>
- public static class Reduce extends MapReduceBase implements
- Reducer<Text, IntWritable, Text, IntWritable> {
- </code>
- </td>
- </tr>
- <tr>
- <td>29.</td>
- <td>
-
- <code>
- public void reduce(Text key, Iterator<IntWritable> values,
- OutputCollector<Text, IntWritable> output,
- Reporter reporter) throws IOException {
- </code>
- </td>
- </tr>
- <tr>
- <td>30.</td>
- <td>
-
- <code>int sum = 0;</code>
- </td>
- </tr>
- <tr>
- <td>31.</td>
- <td>
-
- <code>while (values.hasNext()) {</code>
- </td>
- </tr>
- <tr>
- <td>32.</td>
- <td>
-
- <code>sum += values.next().get();</code>
- </td>
- </tr>
- <tr>
- <td>33.</td>
- <td>
-
- <code>}</code>
- </td>
- </tr>
- <tr>
- <td>34.</td>
- <td>
-
- <code>output.collect(key, new IntWritable(sum));</code>
- </td>
- </tr>
- <tr>
- <td>35.</td>
- <td>
-
- <code>}</code>
- </td>
- </tr>
- <tr>
- <td>36.</td>
- <td>
-
- <code>}</code>
- </td>
- </tr>
- <tr>
- <td>37.</td>
- <td></td>
- </tr>
- <tr>
- <td>38.</td>
- <td>
-
- <code>
- public static void main(String[] args) throws Exception {
- </code>
- </td>
- </tr>
- <tr>
- <td>39.</td>
- <td>
-
- <code>
- JobConf conf = new JobConf(WordCount.class);
- </code>
- </td>
- </tr>
- <tr>
- <td>40.</td>
- <td>
-
- <code>conf.setJobName("wordcount");</code>
- </td>
- </tr>
- <tr>
- <td>41.</td>
- <td></td>
- </tr>
- <tr>
- <td>42.</td>
- <td>
-
- <code>conf.setOutputKeyClass(Text.class);</code>
- </td>
- </tr>
- <tr>
- <td>43.</td>
- <td>
-
- <code>conf.setOutputValueClass(IntWritable.class);</code>
- </td>
- </tr>
- <tr>
- <td>44.</td>
- <td></td>
- </tr>
- <tr>
- <td>45.</td>
- <td>
-
- <code>conf.setMapperClass(Map.class);</code>
- </td>
- </tr>
- <tr>
- <td>46.</td>
- <td>
-
- <code>conf.setCombinerClass(Reduce.class);</code>
- </td>
- </tr>
- <tr>
- <td>47.</td>
- <td>
-
- <code>conf.setReducerClass(Reduce.class);</code>
- </td>
- </tr>
- <tr>
- <td>48.</td>
- <td></td>
- </tr>
- <tr>
- <td>49.</td>
- <td>
-
- <code>conf.setInputFormat(TextInputFormat.class);</code>
- </td>
- </tr>
- <tr>
- <td>50.</td>
- <td>
-
- <code>conf.setOutputFormat(TextOutputFormat.class);</code>
- </td>
- </tr>
- <tr>
- <td>51.</td>
- <td></td>
- </tr>
- <tr>
- <td>52.</td>
- <td>
-
- <code>FileInputFormat.setInputPaths(conf, new Path(args[0]));</code>
- </td>
- </tr>
- <tr>
- <td>53.</td>
- <td>
-
- <code>FileOutputFormat.setOutputPath(conf, new Path(args[1]));</code>
- </td>
- </tr>
- <tr>
- <td>54.</td>
- <td></td>
- </tr>
- <tr>
- <td>55.</td>
- <td>
-
- <code>JobClient.runJob(conf);</code>
- </td>
- </tr>
- <tr>
- <td>57.</td>
- <td>
-
- <code>}</code>
- </td>
- </tr>
- <tr>
- <td>58.</td>
- <td>
- <code>}</code>
- </td>
- </tr>
- <tr>
- <td>59.</td>
- <td></td>
- </tr>
- </table>
- </section>
-
- <section>
- <title>Usage</title>
-
- <p>Assuming <code>HADOOP_HOME</code> is the root of the installation and
- <code>HADOOP_VERSION</code> is the Hadoop version installed, compile
- <code>WordCount.java</code> and create a jar:</p>
- <p>
- <code>$ mkdir wordcount_classes</code><br/>
- <code>
- $ javac -classpath ${HADOOP_HOME}/hadoop-${HADOOP_VERSION}-core.jar
- -d wordcount_classes WordCount.java
- </code><br/>
- <code>$ jar -cvf /usr/joe/wordcount.jar -C wordcount_classes/ .</code>
- </p>
-
- <p>Assuming that:</p>
- <ul>
- <li>
- <code>/usr/joe/wordcount/input</code> - input directory in HDFS
- </li>
- <li>
- <code>/usr/joe/wordcount/output</code> - output directory in HDFS
- </li>
- </ul>
-
- <p>Sample text-files as input:</p>
- <p>
- <code>$ bin/hadoop dfs -ls /usr/joe/wordcount/input/</code><br/>
- <code>/usr/joe/wordcount/input/file01</code><br/>
- <code>/usr/joe/wordcount/input/file02</code><br/>
- <br/>
- <code>$ bin/hadoop dfs -cat /usr/joe/wordcount/input/file01</code><br/>
- <code>Hello World Bye World</code><br/>
- <br/>
- <code>$ bin/hadoop dfs -cat /usr/joe/wordcount/input/file02</code><br/>
- <code>Hello Hadoop Goodbye Hadoop</code>
- </p>
- <p>Run the application:</p>
- <p>
- <code>
- $ bin/hadoop jar /usr/joe/wordcount.jar org.myorg.WordCount
- /usr/joe/wordcount/input /usr/joe/wordcount/output
- </code>
- </p>
- <p>Output:</p>
- <p>
- <code>
- $ bin/hadoop dfs -cat /usr/joe/wordcount/output/part-00000
- </code>
- <br/>
- <code>Bye 1</code><br/>
- <code>Goodbye 1</code><br/>
- <code>Hadoop 2</code><br/>
- <code>Hello 2</code><br/>
- <code>World 2</code><br/>
- </p>
- </section>
-
- <section>
- <title>Walk-through</title>
-
- <p>The <code>WordCount</code> application is quite straight-forward.</p>
-
- <p>The <code>Mapper</code> implementation (lines 14-26), via the
- <code>map</code> method (lines 18-25), processes one line at a time,
- as provided by the specified <code>TextInputFormat</code> (line 49).
- It then splits the line into tokens separated by whitespaces, via the
- <code>StringTokenizer</code>, and emits a key-value pair of
- <code>< <word>, 1></code>.</p>
-
- <p>
- For the given sample input the first map emits:<br/>
- <code>< Hello, 1></code><br/>
- <code>< World, 1></code><br/>
- <code>< Bye, 1></code><br/>
- <code>< World, 1></code><br/>
- </p>
-
- <p>
- The second map emits:<br/>
- <code>< Hello, 1></code><br/>
- <code>< Hadoop, 1></code><br/>
- <code>< Goodbye, 1></code><br/>
- <code>< Hadoop, 1></code><br/>
- </p>
-
- <p>We'll learn more about the number of maps spawned for a given job, and
- how to control them in a fine-grained manner, a bit later in the
- tutorial.</p>
-
- <p><code>WordCount</code> also specifies a <code>combiner</code> (line
- 46). Hence, the output of each map is passed through the local combiner
- (which is same as the <code>Reducer</code> as per the job
- configuration) for local aggregation, after being sorted on the
- <em>key</em>s.</p>
- <p>
- The output of the first map:<br/>
- <code>< Bye, 1></code><br/>
- <code>< Hello, 1></code><br/>
- <code>< World, 2></code><br/>
- </p>
-
- <p>
- The output of the second map:<br/>
- <code>< Goodbye, 1></code><br/>
- <code>< Hadoop, 2></code><br/>
- <code>< Hello, 1></code><br/>
- </p>
- <p>The <code>Reducer</code> implementation (lines 28-36), via the
- <code>reduce</code> method (lines 29-35) just sums up the values,
- which are the occurence counts for each key (i.e. words in this example).
- </p>
-
- <p>
- Thus the output of the job is:<br/>
- <code>< Bye, 1></code><br/>
- <code>< Goodbye, 1></code><br/>
- <code>< Hadoop, 2></code><br/>
- <code>< Hello, 2></code><br/>
- <code>< World, 2></code><br/>
- </p>
-
- <p>The <code>run</code> method specifies various facets of the job, such
- as the input/output paths (passed via the command line), key/value
- types, input/output formats etc., in the <code>JobConf</code>.
- It then calls the <code>JobClient.runJob</code> (line 55) to submit the
- and monitor its progress.</p>
- <p>We'll learn more about <code>JobConf</code>, <code>JobClient</code>,
- <code>Tool</code> and other interfaces and classes a bit later in the
- tutorial.</p>
- </section>
- </section>
-
- <section>
- <title>Map-Reduce - User Interfaces</title>
-
- <p>This section provides a reasonable amount of detail on every user-facing
- aspect of the Map-Reduce framwork. This should help users implement,
- configure and tune their jobs in a fine-grained manner. However, please
- note that the javadoc for each class/interface remains the most
- comprehensive documentation available; this is only meant to be a tutorial.
- </p>
-
- <p>Let us first take the <code>Mapper</code> and <code>Reducer</code>
- interfaces. Applications typically implement them to provide the
- <code>map</code> and <code>reduce</code> methods.</p>
-
- <p>We will then discuss other core interfaces including
- <code>JobConf</code>, <code>JobClient</code>, <code>Partitioner</code>,
- <code>OutputCollector</code>, <code>Reporter</code>,
- <code>InputFormat</code>, <code>OutputFormat</code> and others.</p>
-
- <p>Finally, we will wrap up by discussing some useful features of the
- framework such as the <code>DistributedCache</code>,
- <code>IsolationRunner</code> etc.</p>
- <section>
- <title>Payload</title>
-
- <p>Applications typically implement the <code>Mapper</code> and
- <code>Reducer</code> interfaces to provide the <code>map</code> and
- <code>reduce</code> methods. These form the core of the job.</p>
-
- <section>
- <title>Mapper</title>
- <p><a href="ext:api/org/apache/hadoop/mapred/mapper">
- Mapper</a> maps input key/value pairs to a set of intermediate
- key/value pairs.</p>
-
- <p>Maps are the individual tasks that transform input records into
- intermediate records. The transformed intermediate records do not need
- to be of the same type as the input records. A given input pair may
- map to zero or many output pairs.</p>
-
- <p>The Hadoop Map-Reduce framework spawns one map task for each
- <code>InputSplit</code> generated by the <code>InputFormat</code> for
- the job.</p>
-
- <p>Overall, <code>Mapper</code> implementations are passed the
- <code>JobConf</code> for the job via the
- <a href="ext:api/org/apache/hadoop/mapred/jobconfigurable/configure">
- JobConfigurable.configure(JobConf)</a> method and override it to
- initialize themselves. The framework then calls
- <a href="ext:api/org/apache/hadoop/mapred/mapper/map">
- map(WritableComparable, Writable, OutputCollector, Reporter)</a> for
- each key/value pair in the <code>InputSplit</code> for that task.
- Applications can then override the
- <a href="ext:api/org/apache/hadoop/io/closeable/close">
- Closeable.close()</a> method to perform any required cleanup.</p>
-
- <p>Output pairs do not need to be of the same types as input pairs. A
- given input pair may map to zero or many output pairs. Output pairs
- are collected with calls to
- <a href="ext:api/org/apache/hadoop/mapred/outputcollector/collect">
- OutputCollector.collect(WritableComparable,Writable)</a>.</p>
- <p>Applications can use the <code>Reporter</code> to report
- progress, set application-level status messages and update
- <code>Counters</code>, or just indicate that they are alive.</p>
-
- <p>All intermediate values associated with a given output key are
- subsequently grouped by the framework, and passed to the
- <code>Reducer</code>(s) to determine the final output. Users can
- control the grouping by specifying a <code>Comparator</code> via
- <a href="ext:api/org/apache/hadoop/mapred/jobconf/setoutputkeycomparatorclass">
- JobConf.setOutputKeyComparatorClass(Class)</a>.</p>
- <p>The <code>Mapper</code> outputs are sorted and then
- partitioned per <code>Reducer</code>. The total number of partitions is
- the same as the number of reduce tasks for the job. Users can control
- which keys (and hence records) go to which <code>Reducer</code> by
- implementing a custom <code>Partitioner</code>.</p>
-
- <p>Users can optionally specify a <code>combiner</code>, via
- <a href="ext:api/org/apache/hadoop/mapred/jobconf/setcombinerclass">
- JobConf.setCombinerClass(Class)</a>, to perform local aggregation of
- the intermediate outputs, which helps to cut down the amount of data
- transferred from the <code>Mapper</code> to the <code>Reducer</code>.
- </p>
-
- <p>The intermediate, sorted outputs are always stored in files of
- <a href="ext:api/org/apache/hadoop/io/sequencefile">
- SequenceFile</a> format. Applications can control if, and how, the
- intermediate outputs are to be compressed and the
- <a href="ext:api/org/apache/hadoop/io/compress/compressioncodec">
- CompressionCodec</a> to be used via the <code>JobConf</code>.
- </p>
-
- <section>
- <title>How Many Maps?</title>
-
- <p>The number of maps is usually driven by the total size of the
- inputs, that is, the total number of blocks of the input files.</p>
-
- <p>The right level of parallelism for maps seems to be around 10-100
- maps per-node, although it has been set up to 300 maps for very
- cpu-light map tasks. Task setup takes awhile, so it is best if the
- maps take at least a minute to execute.</p>
-
- <p>Thus, if you expect 10TB of input data and have a blocksize of
- <code>128MB</code>, you'll end up with 82,000 maps, unless
- <a href="ext:api/org/apache/hadoop/mapred/jobconf/setnummaptasks">
- setNumMapTasks(int)</a> (which only provides a hint to the framework)
- is used to set it even higher.</p>
- </section>
- </section>
-
- <section>
- <title>Reducer</title>
-
- <p><a href="ext:api/org/apache/hadoop/mapred/reducer">
- Reducer</a> reduces a set of intermediate values which share a key to
- a smaller set of values.</p>
-
- <p>The number of reduces for the job is set by the user
- via <a href="ext:api/org/apache/hadoop/mapred/jobconf/setnumreducetasks">
- JobConf.setNumReduceTasks(int)</a>.</p>
-
- <p>Overall, <code>Reducer</code> implementations are passed the
- <code>JobConf</code> for the job via the
- <a href="ext:api/org/apache/hadoop/mapred/jobconfigurable/configure">
- JobConfigurable.configure(JobConf)</a> method and can override it to
- initialize themselves. The framework then calls
- <a href="ext:api/org/apache/hadoop/mapred/reducer/reduce">
- reduce(WritableComparable, Iterator, OutputCollector, Reporter)</a>
- method for each <code><key, (list of values)></code>
- pair in the grouped inputs. Applications can then override the
- <a href="ext:api/org/apache/hadoop/io/closeable/close">
- Closeable.close()</a> method to perform any required cleanup.</p>
- <p><code>Reducer</code> has 3 primary phases: shuffle, sort and reduce.
- </p>
-
- <section>
- <title>Shuffle</title>
-
- <p>Input to the <code>Reducer</code> is the sorted output of the
- mappers. In this phase the framework fetches the relevant partition
- of the output of all the mappers, via HTTP.</p>
- </section>
-
- <section>
- <title>Sort</title>
-
- <p>The framework groups <code>Reducer</code> inputs by keys (since
- different mappers may have output the same key) in this stage.</p>
-
- <p>The shuffle and sort phases occur simultaneously; while
- map-outputs are being fetched they are merged.</p>
-
- <section>
- <title>Secondary Sort</title>
-
- <p>If equivalence rules for grouping the intermediate keys are
- required to be different from those for grouping keys before
- reduction, then one may specify a <code>Comparator</code> via
- <a href="ext:api/org/apache/hadoop/mapred/jobconf/setoutputvaluegroupingcomparator">
- JobConf.setOutputValueGroupingComparator(Class)</a>. Since
- <a href="ext:api/org/apache/hadoop/mapred/jobconf/setoutputkeycomparatorclass">
- JobConf.setOutputKeyComparatorClass(Class)</a> can be used to
- control how intermediate keys are grouped, these can be used in
- conjunction to simulate <em>secondary sort on values</em>.</p>
- </section>
- </section>
-
- <section>
- <title>Reduce</title>
-
- <p>In this phase the
- <a href="ext:api/org/apache/hadoop/mapred/reducer/reduce">
- reduce(WritableComparable, Iterator, OutputCollector, Reporter)</a>
- method is called for each <code><key, (list of values)></code>
- pair in the grouped inputs.</p>
-
- <p>The output of the reduce task is typically written to the
- <a href="ext:api/org/apache/hadoop/fs/filesystem">
- FileSystem</a> via
- <a href="ext:api/org/apache/hadoop/mapred/outputcollector/collect">
- OutputCollector.collect(WritableComparable, Writable)</a>.</p>
-
- <p>Applications can use the <code>Reporter</code> to report
- progress, set application-level status messages and update
- <code>Counters</code>, or just indicate that they are alive.</p>
-
- <p>The output of the <code>Reducer</code> is <em>not sorted</em>.</p>
- </section>
-
- <section>
- <title>How Many Reduces?</title>
-
- <p>The right number of reduces seems to be <code>0.95</code> or
- <code>1.75</code> multiplied by (<<em>no. of nodes</em>> *
- <code>mapred.tasktracker.reduce.tasks.maximum</code>).</p>
-
- <p>With <code>0.95</code> all of the reduces can launch immediately
- and start transfering map outputs as the maps finish. With
- <code>1.75</code> the faster nodes will finish their first round of
- reduces and launch a second wave of reduces doing a much better job
- of load balancing.</p>
-
- <p>Increasing the number of reduces increases the framework overhead,
- but increases load balancing and lowers the cost of failures.</p>
-
- <p>The scaling factors above are slightly less than whole numbers to
- reserve a few reduce slots in the framework for speculative-tasks and
- failed tasks.</p>
- </section>
-
- <section>
- <title>Reducer NONE</title>
-
- <p>It is legal to set the number of reduce-tasks to <em>zero</em> if
- no reduction is desired.</p>
-
- <p>In this case the outputs of the map-tasks go directly to the
- <code>FileSystem</code>, into the output path set by
- <a href="ext:api/org/apache/hadoop/mapred/fileoutputformat/setoutputpath">
- setOutputPath(Path)</a>. The framework does not sort the
- map-outputs before writing them out to the <code>FileSystem</code>.
- </p>
- </section>
- </section>
-
- <section>
- <title>Partitioner</title>
-
- <p><a href="ext:api/org/apache/hadoop/mapred/partitioner">
- Partitioner</a> partitions the key space.</p>
- <p>Partitioner controls the partitioning of the keys of the
- intermediate map-outputs. The key (or a subset of the key) is used to
- derive the partition, typically by a <em>hash function</em>. The total
- number of partitions is the same as the number of reduce tasks for the
- job. Hence this controls which of the <code>m</code> reduce tasks the
- intermediate key (and hence the record) is sent to for reduction.</p>
-
- <p><a href="ext:api/org/apache/hadoop/mapred/lib/hashpartitioner">
- HashPartitioner</a> is the default <code>Partitioner</code>.</p>
- </section>
-
- <section>
- <title>Reporter</title>
-
- <p><a href="ext:api/org/apache/hadoop/mapred/reporter">
- Reporter</a> is a facility for Map-Reduce applications to report
- progress, set application-level status messages and update
- <code>Counters</code>.</p>
-
- <p><code>Mapper</code> and <code>Reducer</code> implementations can use
- the <code>Reporter</code> to report progress or just indicate
- that they are alive. In scenarios where the application takes a
- significant amount of time to process individual key/value pairs,
- this is crucial since the framework might assume that the task has
- timed-out and kill that task. Another way to avoid this is to
- set the configuration parameter <code>mapred.task.timeout</code> to a
- high-enough value (or even set it to <em>zero</em> for no time-outs).
- </p>
- <p>Applications can also update <code>Counters</code> using the
- <code>Reporter</code>.</p>
- </section>
-
- <section>
- <title>OutputCollector</title>
-
- <p><a href="ext:api/org/apache/hadoop/mapred/outputcollector">
- OutputCollector</a> is a generalization of the facility provided by
- the Map-Reduce framework to collect data output by the
- <code>Mapper</code> or the <code>Reducer</code> (either the
- intermediate outputs or the output of the job).</p>
- </section>
-
- <p>Hadoop Map-Reduce comes bundled with a
- <a href="ext:api/org/apache/hadoop/mapred/lib/package-summary">
- library</a> of generally useful mappers, reducers, and partitioners.</p>
- </section>
-
- <section>
- <title>Job Configuration</title>
-
- <p><a href="ext:api/org/apache/hadoop/mapred/jobconf">
- JobConf</a> represents a Map-Reduce job configuration.</p>
-
- <p><code>JobConf</code> is the primary interface for a user to describe
- a map-reduce job to the Hadoop framework for execution. The framework
- tries to faithfully execute the job as described by <code>JobConf</code>,
- however:</p>
- <ul>
- <li>f
- Some configuration parameters may have been marked as
- <a href="ext:api/org/apache/hadoop/conf/configuration/final_parameters">
- final</a> by administrators and hence cannot be altered.
- </li>
- <li>
- While some job parameters are straight-forward to set (e.g.
- <a href="ext:api/org/apache/hadoop/mapred/jobconf/setnumreducetasks">
- setNumReduceTasks(int)</a>), other parameters interact subtly with
- the rest of the framework and/or job configuration and are
- more complex to set (e.g.
- <a href="ext:api/org/apache/hadoop/mapred/jobconf/setnummaptasks">
- setNumMapTasks(int)</a>).
- </li>
- </ul>
-
- <p><code>JobConf</code> is typically used to specify the
- <code>Mapper</code>, combiner (if any), <code>Partitioner</code>,
- <code>Reducer</code>, <code>InputFormat</code> and
- <code>OutputFormat</code> implementations. <code>JobConf</code> also
- indicates the set of input files
- (<a href="ext:api/org/apache/hadoop/mapred/fileinputformat/setinputpaths">setInputPaths(JobConf, Path...)</a>
- /<a href="ext:api/org/apache/hadoop/mapred/fileinputformat/addinputpath">addInputPath(JobConf, Path)</a>)
- and (<a href="ext:api/org/apache/hadoop/mapred/fileinputformat/setinputpathstring">setInputPaths(JobConf, String)</a>
- /<a href="ext:api/org/apache/hadoop/mapred/fileinputformat/addinputpathstring">addInputPaths(JobConf, String)</a>)
- and where the output files should be written
- (<a href="ext:api/org/apache/hadoop/mapred/fileoutputformat/setoutputpath">setOutputPath(Path)</a>).</p>
- <p>Optionally, <code>JobConf</code> is used to specify other advanced
- facets of the job such as the <code>Comparator</code> to be used, files
- to be put in the <code>DistributedCache</code>, whether intermediate
- and/or job outputs are to be compressed (and how), debugging via
- user-provided scripts
- (<a href="ext:api/org/apache/hadoop/mapred/jobconf/setmapdebugscript">setMapDebugScript(String)</a>/<a href="ext:api/org/apache/hadoop/mapred/jobconf/setreducedebugscript">setReduceDebugScript(String)</a>)
- , whether job tasks can be executed in a <em>speculative</em> manner
- (<a href="ext:api/org/apache/hadoop/mapred/jobconf/setmapspeculativeexecution">setMapSpeculativeExecution(boolean)</a>)/(<a href="ext:api/org/apache/hadoop/mapred/jobconf/setreducespeculativeexecution">setReduceSpeculativeExecution(boolean)</a>)
- , maximum number of attempts per task
- (<a href="ext:api/org/apache/hadoop/mapred/jobconf/setmaxmapattempts">setMaxMapAttempts(int)</a>/<a href="ext:api/org/apache/hadoop/mapred/jobconf/setmaxreduceattempts">setMaxReduceAttempts(int)</a>)
- , percentage of tasks failure which can be tolerated by the job
- (<a href="ext:api/org/apache/hadoop/mapred/jobconf/setmaxmaptaskfailurespercent">setMaxMapTaskFailuresPercent(int)</a>/<a href="ext:api/org/apache/hadoop/mapred/jobconf/setmaxreducetaskfailurespercent">setMaxReduceTaskFailuresPercent(int)</a>)
- etc.</p>
-
- <p>Of course, users can use
- <a href="ext:api/org/apache/hadoop/conf/configuration/set">set(String, String)</a>/<a href="ext:api/org/apache/hadoop/conf/configuration/get">get(String, String)</a>
- to set/get arbitrary parameters needed by applications. However, use the
- <code>DistributedCache</code> for large amounts of (read-only) data.</p>
- </section>
- <section>
- <title>Task Execution & Environment</title>
- <p>The <code>TaskTracker</code> executes the <code>Mapper</code>/
- <code>Reducer</code> <em>task</em> as a child process in a separate jvm.
- </p>
-
- <p>The child-task inherits the environment of the parent
- <code>TaskTracker</code>. The user can specify additional options to the
- child-jvm via the <code>mapred.child.java.opts</code> configuration
- parameter in the <code>JobConf</code> such as non-standard paths for the
- run-time linker to search shared libraries via
- <code>-Djava.library.path=<></code> etc. If the
- <code>mapred.child.java.opts</code> contains the symbol <em>@taskid@</em>
- it is interpolated with value of <code>taskid</code> of the map/reduce
- task.</p>
-
- <p>Here is an example with multiple arguments and substitutions,
- showing jvm GC logging, and start of a passwordless JVM JMX agent so that
- it can connect with jconsole and the likes to watch child memory,
- threads and get thread dumps. It also sets the maximum heap-size of the
- child jvm to 512MB and adds an additional path to the
- <code>java.library.path</code> of the child-jvm.</p>
- <p>
- <code><property></code><br/>
- <code><name>mapred.child.java.opts</name></code><br/>
- <code><value></code><br/>
- <code>
- -Xmx512M -Djava.library.path=/home/mycompany/lib
- -verbose:gc -Xloggc:/tmp/@taskid@.gc</code><br/>
- <code>
- -Dcom.sun.management.jmxremote.authenticate=false
- -Dcom.sun.management.jmxremote.ssl=false</code><br/>
- <code></value></code><br/>
- <code></property></code>
- </p>
-
- <p>Users/admins can also specify the maximum virtual memory
- of the launched child-task using <code>mapred.child.ulimit</code>.</p>
-
- <p>When the job starts, the localized job directory
- <code> ${mapred.local.dir}/taskTracker/jobcache/$jobid/</code>
- has the following directories: </p>
- <ul>
- <li> A job-specific shared directory, created at location
- <code>${mapred.local.dir}/taskTracker/jobcache/$jobid/work/ </code>.
- This directory is exposed to the users through
- <code>job.local.dir </code>. The tasks can use this space as scratch
- space and share files among them. The directory can accessed through
- api <a href="ext:api/org/apache/hadoop/mapred/jobconf/getjoblocaldir">
- JobConf.getJobLocalDir()</a>. It is available as System property also.
- So,users can call <code>System.getProperty("job.local.dir")</code>;
- </li>
- <li>A jars directory, which has the job jar file and expanded jar </li>
- <li>A job.xml file, the generic job configuration </li>
- <li>Each task has directory <code>task-id</code> which again has the
- following structure
- <ul>
- <li>A job.xml file, task localized job configuration </li>
- <li>A directory for intermediate output files</li>
- <li>The working directory of the task.
- And work directory has a temporary directory
- to create temporary files</li>
- </ul>
- </li>
- </ul>
-
- <p>The <a href="#DistributedCache">DistributedCache</a> can also be used
- as a rudimentary software distribution mechanism for use in the map
- and/or reduce tasks. It can be used to distribute both jars and
- native libraries. The
- <a href="ext:api/org/apache/hadoop/filecache/distributedcache/addarchivetoclasspath">
- DistributedCache.addArchiveToClassPath(Path, Configuration)</a> or
- <a href="ext:api/org/apache/hadoop/filecache/distributedcache/addfiletoclasspath">
- DistributedCache.addFileToClassPath(Path, Configuration)</a> api can
- be used to cache files/jars and also add them to the <em>classpath</em>
- of child-jvm. Similarly the facility provided by the
- <code>DistributedCache</code> where-in it symlinks the cached files into
- the working directory of the task can be used to distribute native
- libraries and load them. The underlying detail is that child-jvm always
- has its <em>current working directory</em> added to the
- <code>java.library.path</code> and hence the cached libraries can be
- loaded via <a href="http://java.sun.com/j2se/1.5.0/docs/api/java/lang/System.html#loadLibrary(java.lang.String)">
- System.loadLibrary</a> or <a href="http://java.sun.com/j2se/1.5.0/docs/api/java/lang/System.html#load(java.lang.String)">
- System.load</a>.</p>
- </section>
-
- <section>
- <title>Job Submission and Monitoring</title>
-
- <p><a href="ext:api/org/apache/hadoop/mapred/jobclient">
- JobClient</a> is the primary interface by which user-job interacts
- with the <code>JobTracker</code>.</p>
-
- <p><code>JobClient</code> provides facilities to submit jobs, track their
- progress, access component-tasks' reports/logs, get the Map-Reduce
- cluster's status information and so on.</p>
-
- <p>The job submission process involves:</p>
- <ol>
- <li>Checking the input and output specifications of the job.</li>
- <li>Computing the <code>InputSplit</code> values for the job.</li>
- <li>
- Setting up the requisite accounting information for the
- <code>DistributedCache</code> of the job, if necessary.
- </li>
- <li>
- Copying the job's jar and configuration to the map-reduce system
- directory on the <code>FileSystem</code>.
- </li>
- <li>
- Submitting the job to the <code>JobTracker</code> and optionally
- monitoring it's status.
- </li>
- </ol>
- <p> Job history files are also logged to user specified directory
- <code>hadoop.job.history.user.location</code>
- which defaults to job output directory. The files are stored in
- "_logs/history/" in the specified directory. Hence, by default they
- will be in mapred.output.dir/_logs/history. User can stop
- logging by giving the value <code>none</code> for
- <code>hadoop.job.history.user.location</code></p>
- <p> User can view the history logs summary in specified directory
- using the following command <br/>
- <code>$ bin/hadoop job -history output-dir</code><br/>
- This command will print job details, failed and killed tip
- details. <br/>
- More details about the job such as successful tasks and
- task attempts made for each task can be viewed using the
- following command <br/>
- <code>$ bin/hadoop job -history all output-dir</code><br/></p>
-
- <p> User can use
- <a href="ext:api/org/apache/hadoop/mapred/outputlogfilter">OutputLogFilter</a>
- to filter log files from the output directory listing. </p>
-
- <p>Normally the user creates the application, describes various facets
- of the job via <code>JobConf</code>, and then uses the
- <code>JobClient</code> to submit the job and monitor its progress.</p>
- <section>
- <title>Job Control</title>
-
- <p>Users may need to chain map-reduce jobs to accomplish complex
- tasks which cannot be done via a single map-reduce job. This is fairly
- easy since the output of the job typically goes to distributed
- file-system, and the output, in turn, can be used as the input for the
- next job.</p>
-
- <p>However, this also means that the onus on ensuring jobs are
- complete (success/failure) lies squarely on the clients. In such
- cases, the various job-control options are:</p>
- <ul>
- <li>
- <a href="ext:api/org/apache/hadoop/mapred/jobclient/runjob">
- runJob(JobConf)</a> : Submits the job and returns only after the
- job has completed.
- </li>
- <li>
- <a href="ext:api/org/apache/hadoop/mapred/jobclient/submitjob">
- submitJob(JobConf)</a> : Only submits the job, then poll the
- returned handle to the
- <a href="ext:api/org/apache/hadoop/mapred/runningjob">
- RunningJob</a> to query status and make scheduling decisions.
- </li>
- <li>
- <a href="ext:api/org/apache/hadoop/mapred/jobconf/setjobendnotificationuri">
- JobConf.setJobEndNotificationURI(String)</a> : Sets up a
- notification upon job-completion, thus avoiding polling.
- </li>
- </ul>
- </section>
- </section>
- <section>
- <title>Job Input</title>
-
- <p><a href="ext:api/org/apache/hadoop/mapred/inputformat">
- InputFormat</a> describes the input-specification for a Map-Reduce job.
- </p>
-
- <p>The Map-Reduce framework relies on the <code>InputFormat</code> of
- the job to:</p>
- <ol>
- <li>Validate the input-specification of the job.</li>
- <li>
- Split-up the input file(s) into logical <code>InputSplit</code>
- instances, each of which is then assigned to an individual
- <code>Mapper</code>.
- </li>
- <li>
- Provide the <code>RecordReader</code> implementation used to
- glean input records from the logical <code>InputSplit</code> for
- processing by the <code>Mapper</code>.
- </li>
- </ol>
-
- <p>The default behavior of file-based <code>InputFormat</code>
- implementations, typically sub-classes of
- <a href="ext:api/org/apache/hadoop/mapred/fileinputformat">
- FileInputFormat</a>, is to split the input into <em>logical</em>
- <code>InputSplit</code> instances based on the total size, in bytes, of
- the input files. However, the <code>FileSystem</code> blocksize of the
- input files is treated as an upper bound for input splits. A lower bound
- on the split size can be set via <code>mapred.min.split.size</code>.</p>
-
- <p>Clearly, logical splits based on input-size is insufficient for many
- applications since record boundaries must be respected. In such cases,
- the application should implement a <code>RecordReader</code>, who is
- responsible for respecting record-boundaries and presents a
- record-oriented view of the logical <code>InputSplit</code> to the
- individual task.</p>
- <p><a href="ext:api/org/apache/hadoop/mapred/textinputformat">
- TextInputFormat</a> is the default <code>InputFormat</code>.</p>
-
- <p>If <code>TextInputFormat</code> is the <code>InputFormat</code> for a
- given job, the framework detects input-files with the <em>.gz</em> and
- <em>.lzo</em> extensions and automatically decompresses them using the
- appropriate <code>CompressionCodec</code>. However, it must be noted that
- compressed files with the above extensions cannot be <em>split</em> and
- each compressed file is processed in its entirety by a single mapper.</p>
-
- <section>
- <title>InputSplit</title>
-
- <p><a href="ext:api/org/apache/hadoop/mapred/inputsplit">
- InputSplit</a> represents the data to be processed by an individual
- <code>Mapper</code>.</p>
- <p>Typically <code>InputSplit</code> presents a byte-oriented view of
- the input, and it is the responsibility of <code>RecordReader</code>
- to process and present a record-oriented view.</p>
-
- <p><a href="ext:api/org/apache/hadoop/mapred/filesplit">
- FileSplit</a> is the default <code>InputSplit</code>. It sets
- <code>map.input.file</code> to the path of the input file for the
- logical split.</p>
- </section>
-
- <section>
- <title>RecordReader</title>
-
- <p><a href="ext:api/org/apache/hadoop/mapred/recordreader">
- RecordReader</a> reads <code><key, value></code> pairs from an
- <code>InputSplit</code>.</p>
- <p>Typically the <code>RecordReader</code> converts the byte-oriented
- view of the input, provided by the <code>InputSplit</code>, and
- presents a record-oriented to the <code>Mapper</code> implementations
- for processing. <code>RecordReader</code> thus assumes the
- responsibility of processing record boundaries and presents the tasks
- with keys and values.</p>
- </section>
- </section>
- <section>
- <title>Job Output</title>
-
- <p><a href="ext:api/org/apache/hadoop/mapred/outputformat">
- OutputFormat</a> describes the output-specification for a Map-Reduce
- job.</p>
- <p>The Map-Reduce framework relies on the <code>OutputFormat</code> of
- the job to:</p>
- <ol>
- <li>
- Validate the output-specification of the job; for example, check that
- the output directory doesn't already exist.
- </li>
- <li>
- Provide the <code>RecordWriter</code> implementation used to
- write the output files of the job. Output files are stored in a
- <code>FileSystem</code>.
- </li>
- </ol>
-
- <p><code>TextOutputFormat</code> is the default
- <code>OutputFormat</code>.</p>
-
- <section>
- <title>Task Side-Effect Files</title>
-
- <p>In some applications, component tasks need to create and/or write to
- side-files, which differ from the actual job-output files.</p>
-
- <p>In such cases there could be issues with two instances of the same
- <code>Mapper</code> or <code>Reducer</code> running simultaneously (for
- example, speculative tasks) trying to open and/or write to the same
- file (path) on the <code>FileSystem</code>. Hence the
- application-writer will have to pick unique names per task-attempt
- (using the attemptid, say <code>attempt_200709221812_0001_m_000000_0</code>),
- not just per task.</p>
-
- <p>To avoid these issues the Map-Reduce framework maintains a special
- <code>${mapred.output.dir}/_temporary/_${taskid}</code> sub-directory
- accessible via <code>${mapred.work.output.dir}</code>
- for each task-attempt on the <code>FileSystem</code> where the output
- of the task-attempt is stored. On successful completion of the
- task-attempt, the files in the
- <code>${mapred.output.dir}/_temporary/_${taskid}</code> (only)
- are <em>promoted</em> to <code>${mapred.output.dir}</code>. Of course,
- the framework discards the sub-directory of unsuccessful task-attempts.
- This process is completely transparent to the application.</p>
-
- <p>The application-writer can take advantage of this feature by
- creating any side-files required in <code>${mapred.work.output.dir}</code>
- during execution of a task via
- <a href="ext:api/org/apache/hadoop/mapred/fileoutputformat/getworkoutputpath">
- FileOutputFormat.getWorkOutputPath()</a>, and the framework will promote them
- similarly for succesful task-attempts, thus eliminating the need to
- pick unique paths per task-attempt.</p>
-
- <p>Note: The value of <code>${mapred.work.output.dir}</code> during
- execution of a particular task-attempt is actually
- <code>${mapred.output.dir}/_temporary/_{$taskid}</code>, and this value is
- set by the map-reduce framework. So, just create any side-files in the
- path returned by
- <a href="ext:api/org/apache/hadoop/mapred/fileoutputformat/getworkoutputpath">
- FileOutputFormat.getWorkOutputPath() </a>from map/reduce
- task to take advantage of this feature.</p>
-
- <p>The entire discussion holds true for maps of jobs with
- reducer=NONE (i.e. 0 reduces) since output of the map, in that case,
- goes directly to HDFS.</p>
- </section>
-
- <section>
- <title>RecordWriter</title>
-
- <p><a href="ext:api/org/apache/hadoop/mapred/recordwriter">
- RecordWriter</a> writes the output <code><key, value></code>
- pairs to an output file.</p>
- <p>RecordWriter implementations write the job outputs to the
- <code>FileSystem</code>.</p>
- </section>
- </section>
-
- <section>
- <title>Other Useful Features</title>
-
- <section>
- <title>Counters</title>
-
- <p><code>Counters</code> represent global counters, defined either by
- the Map-Reduce framework or applications. Each <code>Counter</code> can
- be of any <code>Enum</code> type. Counters of a particular
- <code>Enum</code> are bunched into groups of type
- <code>Counters.Group</code>.</p>
-
- <p>Applications can define arbitrary <code>Counters</code> (of type
- <code>Enum</code>) and update them via
- <a href="ext:api/org/apache/hadoop/mapred/reporter/incrcounter">
- Reporter.incrCounter(Enum, long)</a> in the <code>map</code> and/or
- <code>reduce</code> methods. These counters are then globally
- aggregated by the framework.</p>
- </section>
-
- <section>
- <title>DistributedCache</title>
-
- <p><a href="ext:api/org/apache/hadoop/filecache/distributedcache">
- DistributedCache</a> distributes application-specific, large, read-only
- files efficiently.</p>
-
- <p><code>DistributedCache</code> is a facility provided by the
- Map-Reduce framework to cache files (text, archives, jars and so on)
- needed by applications.</p>
-
- <p>Applications specify the files to be cached via urls (hdfs:// or
- http://) in the <code>JobConf</code>. The <code>DistributedCache</code>
- assumes that the files specified via hdfs:// urls are already present
- on the <code>FileSystem</code>.</p>
- <p>The framework will copy the necessary files to the slave node
- before any tasks for the job are executed on that node. Its
- efficiency stems from the fact that the files are only copied once
- per job and the ability to cache archives which are un-archived on
- the slaves.</p>
-
- <p><code>DistributedCache</code> tracks the modification timestamps of
- the cached files. Clearly the cache files should not be modified by
- the application or externally while the job is executing.</p>
- <p><code>DistributedCache</code> can be used to distribute simple,
- read-only data/text files and more complex types such as archives and
- jars. Archives (zip, tar, tgz and tar.gz files) are
- <em>un-archived</em> at the slave nodes.
- Optionally users can also direct the <code>DistributedCache</code> to
- <em>symlink</em> the cached file(s) into the <code>current working
- directory</code> of the task via the
- <a href="ext:api/org/apache/hadoop/filecache/distributedcache/createsymlink">
- DistributedCache.createSymlink(Configuration)</a> api. Files
- have <em>execution permissions</em> set.</p>
- </section>
-
- <section>
- <title>Tool</title>
-
- <p>The <a href="ext:api/org/apache/hadoop/util/tool">Tool</a>
- interface supports the handling of generic Hadoop command-line options.
- </p>
-
- <p><code>Tool</code> is the standard for any Map-Reduce tool or
- application. The application should delegate the handling of
- standard command-line options to
- <a href="ext:api/org/apache/hadoop/util/genericoptionsparser">
- GenericOptionsParser</a> via
- <a href="ext:api/org/apache/hadoop/util/toolrunner/run">
- ToolRunner.run(Tool, String[])</a> and only handle its custom
- arguments.</p>
-
- <p>
- The generic Hadoop command-line options are:<br/>
- <code>
- -conf <configuration file>
- </code>
- <br/>
- <code>
- -D <property=value>
- </code>
- <br/>
- <code>
- -fs <local|namenode:port>
- </code>
- <br/>
- <code>
- -jt <local|jobtracker:port>
- </code>
- </p>
- </section>
-
- <section>
- <title>IsolationRunner</title>
-
- <p><a href="ext:api/org/apache/hadoop/mapred/isolationrunner">
- IsolationRunner</a> is a utility to help debug Map-Reduce programs.</p>
-
- <p>To use the <code>IsolationRunner</code>, first set
- <code>keep.failed.tasks.files</code> to <code>true</code>
- (also see <code>keep.tasks.files.pattern</code>).</p>
-
- <p>
- Next, go to the node on which the failed task ran and go to the
- <code>TaskTracker</code>'s local directory and run the
- <code>IsolationRunner</code>:<br/>
- <code>$ cd <local path>/taskTracker/${taskid}/work</code><br/>
- <code>
- $ bin/hadoop org.apache.hadoop.mapred.IsolationRunner ../job.xml
- </code>
- </p>
-
- <p><code>IsolationRunner</code> will run the failed task in a single
- jvm, which can be in the debugger, over precisely the same input.</p>
- </section>
-
- <section>
- <title>Debugging</title>
- <p>Map/Reduce framework provides a facility to run user-provided
- scripts for debugging. When map/reduce task fails, user can run
- script for doing post-processing on task logs i.e task's stdout,
- stderr, syslog and jobconf. The stdout and stderr of the
- user-provided debug script are printed on the diagnostics.
- These outputs are also displayed on job UI on demand. </p>
- <p> In the following sections we discuss how to submit debug script
- along with the job. For submitting debug script, first it has to
- distributed. Then the script has to supplied in Configuration. </p>
- <section>
- <title> How to distribute script file: </title>
- <p>
- To distribute the debug script file, first copy the file to the dfs.
- The file can be distributed by setting the property
- "mapred.cache.files" with value "path"#"script-name".
- If more than one file has to be distributed, the files can be added
- as comma separated paths. This property can also be set by APIs
- <a href="ext:api/org/apache/hadoop/filecache/distributedcache/addcachefile">
- DistributedCache.addCacheFile(URI,conf) </a> and
- <a href="ext:api/org/apache/hadoop/filecache/distributedcache/setcachefiles">
- DistributedCache.setCacheFiles(URIs,conf) </a> where URI is of
- the form "hdfs://host:port/'absolutepath'#'script-name'".
- For Streaming, the file can be added through
- command line option -cacheFile.
- </p>
-
- <p>
- The files has to be symlinked in the current working directory of
- of the task. To create symlink for the file, the property
- "mapred.create.symlink" is set to "yes". This can also be set by
- <a href="ext:api/org/apache/hadoop/filecache/distributedcache/createsymlink">
- DistributedCache.createSymLink(Configuration) </a> api.
- </p>
- </section>
- <section>
- <title> How to submit script: </title>
- <p> A quick way to submit debug script is to set values for the
- properties "mapred.map.task.debug.script" and
- "mapred.reduce.task.debug.script" for debugging map task and reduce
- task respectively. These properties can also be set by using APIs
- <a href="ext:api/org/apache/hadoop/mapred/jobconf/setmapdebugscript">
- JobConf.setMapDebugScript(String) </a> and
- <a href="ext:api/org/apache/hadoop/mapred/jobconf/setreducedebugscript">
- JobConf.setReduceDebugScript(String) </a>. For streaming, debug
- script can be submitted with command-line options -mapdebug,
- -reducedebug for debugging mapper and reducer respectively.</p>
-
- <p>The arguments of the script are task's stdout, stderr,
- syslog and jobconf files. The debug command, run on the node where
- the map/reduce failed, is: <br/>
- <code> $script $stdout $stderr $syslog $jobconf </code> </p>
- <p> Pipes programs have the c++ program name as a fifth argument
- for the command. Thus for the pipes programs the command is <br/>
- <code>$script $stdout $stderr $syslog $jobconf $program </code>
- </p>
- </section>
-
- <section>
- <title> Default Behavior: </title>
- <p> For pipes, a default script is run to process core dumps under
- gdb, prints stack trace and gives info about running threads. </p>
- </section>
- </section>
-
- <section>
- <title>JobControl</title>
-
- <p><a href="ext:api/org/apache/hadoop/mapred/jobcontrol/package-summary">
- JobControl</a> is a utility which encapsulates a set of Map-Reduce jobs
- and their dependencies.</p>
- </section>
-
- <section>
- <title>Data Compression</title>
-
- <p>Hadoop Map-Reduce provides facilities for the application-writer to
- specify compression for both intermediate map-outputs and the
- job-outputs i.e. output of the reduces. It also comes bundled with
- <a href="ext:api/org/apache/hadoop/io/compress/compressioncodec">
- CompressionCodec</a> implementations for the
- <a href="ext:zlib">zlib</a> and <a href="ext:lzo">lzo</a> compression
- algorithms. The <a href="ext:gzip">gzip</a> file format is also
- supported.</p>
-
- <p>Hadoop also provides native implementations of the above compression
- codecs for reasons of both performance (zlib) and non-availability of
- Java libraries (lzo). More details on their usage and availability are
- available <a href="native_libraries.html">here</a>.</p>
-
- <section>
- <title>Intermediate Outputs</title>
-
- <p>Applications can control compression of intermediate map-outputs
- via the
- <a href="ext:api/org/apache/hadoop/mapred/jobconf/setcompressmapoutput">
- JobConf.setCompressMapOutput(boolean)</a> api and the
- <code>CompressionCodec</code> to be used via the
- <a href="ext:api/org/apache/hadoop/mapred/jobconf/setmapoutputcompressorclass">
- JobConf.setMapOutputCompressorClass(Class)</a> api. Since
- the intermediate map-outputs are always stored in the
- <a href="ext:api/org/apache/hadoop/io/sequencefile">SequenceFile</a>
- format, the
- <a href="ext:api/org/apache/hadoop/io/sequencefilecompressiontype">
- SequenceFile.CompressionType</a> (i.e.
- <a href="ext:api/org/apache/hadoop/io/sequencefilecompressiontype/record">
- RECORD</a> /
- <a href="ext:api/org/apache/hadoop/io/sequencefilecompressiontype/block">
- BLOCK</a> - defaults to <code>RECORD</code>) can be specified via the
- <a href="ext:api/org/apache/hadoop/mapred/jobconf/setmapoutputcompressiontype">
- JobConf.setMapOutputCompressionType(SequenceFile.CompressionType)</a>
- api.</p>
- </section>
-
- <section>
- <title>Job Outputs</title>
-
- <p>Applications can control compression of job-outputs via the
- <a href="ext:api/org/apache/hadoop/mapred/outputformatbase/setcompressoutput">
- OutputFormatBase.setCompressOutput(JobConf, boolean)</a> api and the
- <code>CompressionCodec</code> to be used can be specified via the
- <a href="ext:api/org/apache/hadoop/mapred/outputformatbase/setoutputcompressorclass">
- OutputFormatBase.setOutputCompressorClass(JobConf, Class)</a> api.</p>
-
- <p>If the job outputs are to be stored in the
- <a href="ext:api/org/apache/hadoop/mapred/sequencefileoutputformat">
- SequenceFileOutputFormat</a>, the required
- <code>SequenceFile.CompressionType</code> (i.e. <code>RECORD</code> /
- <code>BLOCK</code> - defaults to <code>RECORD</code>)can be specified
- via the
- <a href="ext:api/org/apache/hadoop/mapred/sequencefileoutputformat/setoutputcompressiontype">
- SequenceFileOutputFormat.setOutputCompressionType(JobConf,
- SequenceFile.CompressionType)</a> api.</p>
- </section>
- </section>
-
- </section>
- </section>
- <section>
- <title>Example: WordCount v2.0</title>
-
- <p>Here is a more complete <code>WordCount</code> which uses many of the
- features provided by the Map-Reduce framework we discussed so far.</p>
-
- <p>This needs the HDFS to be up and running, especially for the
- <code>DistributedCache</code>-related features. Hence it only works with a
- <a href="quickstart.html#SingleNodeSetup">pseudo-distributed</a> or
- <a href="quickstart.html#Fully-Distributed+Operation">fully-distributed</a>
- Hadoop installation.</p>
-
- <section>
- <title>Source Code</title>
-
- <table>
- <tr>
- <th></th>
- <th>WordCount.java</th>
- </tr>
- <tr>
- <td>1.</td>
- <td>
- <code>package org.myorg;</code>
- </td>
- </tr>
- <tr>
- <td>2.</td>
- <td></td>
- </tr>
- <tr>
- <td>3.</td>
- <td>
- <code>import java.io.*;</code>
- </td>
- </tr>
- <tr>
- <td>4.</td>
- <td>
- <code>import java.util.*;</code>
- </td>
- </tr>
- <tr>
- <td>5.</td>
- <td></td>
- </tr>
- <tr>
- <td>6.</td>
- <td>
- <code>import org.apache.hadoop.fs.Path;</code>
- </td>
- </tr>
- <tr>
- <td>7.</td>
- <td>
- <code>import org.apache.hadoop.filecache.DistributedCache;</code>
- </td>
- </tr>
- <tr>
- <td>8.</td>
- <td>
- <code>import org.apache.hadoop.conf.*;</code>
- </td>
- </tr>
- <tr>
- <td>9.</td>
- <td>
- <code>import org.apache.hadoop.io.*;</code>
- </td>
- </tr>
- <tr>
- <td>10.</td>
- <td>
- <code>import org.apache.hadoop.mapred.*;</code>
- </td>
- </tr>
- <tr>
- <td>11.</td>
- <td>
- <code>import org.apache.hadoop.util.*;</code>
- </td>
- </tr>
- <tr>
- <td>12.</td>
- <td></td>
- </tr>
- <tr>
- <td>13.</td>
- <td>
- <code>public class WordCount extends Configured implements Tool {</code>
- </td>
- </tr>
- <tr>
- <td>14.</td>
- <td></td>
- </tr>
- <tr>
- <td>15.</td>
- <td>
-
- <code>
- public static class Map extends MapReduceBase
- implements Mapper<LongWritable, Text, Text, IntWritable> {
- </code>
- </td>
- </tr>
- <tr>
- <td>16.</td>
- <td></td>
- </tr>
- <tr>
- <td>17.</td>
- <td>
-
- <code>
- static enum Counters { INPUT_WORDS }
- </code>
- </td>
- </tr>
- <tr>
- <td>18.</td>
- <td></td>
- </tr>
- <tr>
- <td>19.</td>
- <td>
-
- <code>
- private final static IntWritable one = new IntWritable(1);
- </code>
- </td>
- </tr>
- <tr>
- <td>20.</td>
- <td>
-
- <code>private Text word = new Text();</code>
- </td>
- </tr>
- <tr>
- <td>21.</td>
- <td></td>
- </tr>
- <tr>
- <td>22.</td>
- <td>
-
- <code>private boolean caseSensitive = true;</code>
- </td>
- </tr>
- <tr>
- <td>23.</td>
- <td>
-
- <code>private Set<String> patternsToSkip = new HashSet<String>();</code>
- </td>
- </tr>
- <tr>
- <td>24.</td>
- <td></td>
- </tr>
- <tr>
- <td>25.</td>
- <td>
-
- <code>private long numRecords = 0;</code>
- </td>
- </tr>
- <tr>
- <td>26.</td>
- <td>
-
- <code>private String inputFile;</code>
- </td>
- </tr>
- <tr>
- <td>27.</td>
- <td></td>
- </tr>
- <tr>
- <td>28.</td>
- <td>
-
- <code>public void configure(JobConf job) {</code>
- </td>
- </tr>
- <tr>
- <td>29.</td>
- <td>
-
- <code>
- caseSensitive = job.getBoolean("wordcount.case.sensitive", true);
- </code>
- </td>
- </tr>
- <tr>
- <td>30.</td>
- <td>
-
- <code>inputFile = job.get("map.input.file");</code>
- </td>
- </tr>
- <tr>
- <td>31.</td>
- <td></td>
- </tr>
- <tr>
- <td>32.</td>
- <td>
-
- <code>if (job.getBoolean("wordcount.skip.patterns", false)) {</code>
- </td>
- </tr>
- <tr>
- <td>33.</td>
- <td>
-
- <code>Path[] patternsFiles = new Path[0];</code>
- </td>
- </tr>
- <tr>
- <td>34.</td>
- <td>
-
- <code>try {</code>
- </td>
- </tr>
- <tr>
- <td>35.</td>
- <td>
-
- <code>
- patternsFiles = DistributedCache.getLocalCacheFiles(job);
- </code>
- </td>
- </tr>
- <tr>
- <td>36.</td>
- <td>
-
- <code>} catch (IOException ioe) {</code>
- </td>
- </tr>
- <tr>
- <td>37.</td>
- <td>
-
- <code>
- System.err.println("Caught exception while getting cached files: "
- + StringUtils.stringifyException(ioe));
- </code>
- </td>
- </tr>
- <tr>
- <td>38.</td>
- <td>
-
- <code>}</code>
- </td>
- </tr>
- <tr>
- <td>39.</td>
- <td>
-
- <code>for (Path patternsFile : patternsFiles) {</code>
- </td>
- </tr>
- <tr>
- <td>40.</td>
- <td>
-
- <code>parseSkipFile(patternsFile);</code>
- </td>
- </tr>
- <tr>
- <td>41.</td>
- <td>
-
- <code>}</code>
- </td>
- </tr>
- <tr>
- <td>42.</td>
- <td>
-
- <code>}</code>
- </td>
- </tr>
- <tr>
- <td>43.</td>
- <td>
-
- <code>}</code>
- </td>
- </tr>
- <tr>
- <td>44.</td>
- <td></td>
- </tr>
- <tr>
- <td>45.</td>
- <td>
-
- <code>private void parseSkipFile(Path patternsFile) {</code>
- </td>
- </tr>
- <tr>
- <td>46.</td>
- <td>
-
- <code>try {</code>
- </td>
- </tr>
- <tr>
- <td>47.</td>
- <td>
-
- <code>
- BufferedReader fis =
- new BufferedReader(new FileReader(patternsFile.toString()));
- </code>
- </td>
- </tr>
- <tr>
- <td>48.</td>
- <td>
-
- <code>String pattern = null;</code>
- </td>
- </tr>
- <tr>
- <td>49.</td>
- <td>
-
- <code>while ((pattern = fis.readLine()) != null) {</code>
- </td>
- </tr>
- <tr>
- <td>50.</td>
- <td>
-
- <code>patternsToSkip.add(pattern);</code>
- </td>
- </tr>
- <tr>
- <td>51.</td>
- <td>
-
- <code>}</code>
- </td>
- </tr>
- <tr>
- <td>52.</td>
- <td>
-
- <code>} catch (IOException ioe) {</code>
- </td>
- </tr>
- <tr>
- <td>53.</td>
- <td>
-
- <code>
- System.err.println("Caught exception while parsing the cached file '" +
- patternsFile + "' : " +
- StringUtils.stringifyException(ioe));
-
- </code>
- </td>
- </tr>
- <tr>
- <td>54.</td>
- <td>
-
- <code>}</code>
- </td>
- </tr>
- <tr>
- <td>55.</td>
- <td>
-
- <code>}</code>
- </td>
- </tr>
- <tr>
- <td>56.</td>
- <td></td>
- </tr>
- <tr>
- <td>57.</td>
- <td>
-
- <code>
- public void map(LongWritable key, Text value,
- OutputCollector<Text, IntWritable> output,
- Reporter reporter) throws IOException {
- </code>
- </td>
- </tr>
- <tr>
- <td>58.</td>
- <td>
-
- <code>
- String line =
- (caseSensitive) ? value.toString() :
- value.toString().toLowerCase();
- </code>
- </td>
- </tr>
- <tr>
- <td>59.</td>
- <td></td>
- </tr>
- <tr>
- <td>60.</td>
- <td>
-
- <code>for (String pattern : patternsToSkip) {</code>
- </td>
- </tr>
- <tr>
- <td>61.</td>
- <td>
-
- <code>line = line.replaceAll(pattern, "");</code>
- </td>
- </tr>
- <tr>
- <td>62.</td>
- <td>
-
- <code>}</code>
- </td>
- </tr>
- <tr>
- <td>63.</td>
- <td></td>
- </tr>
- <tr>
- <td>64.</td>
- <td>
-
- <code>StringTokenizer tokenizer = new StringTokenizer(line);</code>
- </td>
- </tr>
- <tr>
- <td>65.</td>
- <td>
-
- <code>while (tokenizer.hasMoreTokens()) {</code>
- </td>
- </tr>
- <tr>
- <td>66.</td>
- <td>
-
- <code>word.set(tokenizer.nextToken());</code>
- </td>
- </tr>
- <tr>
- <td>67.</td>
- <td>
-
- <code>output.collect(word, one);</code>
- </td>
- </tr>
- <tr>
- <td>68.</td>
- <td>
-
- <code>reporter.incrCounter(Counters.INPUT_WORDS, 1);</code>
- </td>
- </tr>
- <tr>
- <td>69.</td>
- <td>
-
- <code>}</code>
- </td>
- </tr>
- <tr>
- <td>70.</td>
- <td></td>
- </tr>
- <tr>
- <td>71.</td>
- <td>
-
- <code>if ((++numRecords % 100) == 0) {</code>
- </td>
- </tr>
- <tr>
- <td>72.</td>
- <td>
-
- <code>
- reporter.setStatus("Finished processing " + numRecords +
- " records " + "from the input file: " +
- inputFile);
- </code>
- </td>
- </tr>
- <tr>
- <td>73.</td>
- <td>
-
- <code>}</code>
- </td>
- </tr>
- <tr>
- <td>74.</td>
- <td>
-
- <code>}</code>
- </td>
- </tr>
- <tr>
- <td>75.</td>
- <td>
-
- <code>}</code>
- </td>
- </tr>
- <tr>
- <td>76.</td>
- <td></td>
- </tr>
- <tr>
- <td>77.</td>
- <td>
-
- <code>
- public static class Reduce extends MapReduceBase implements
- Reducer<Text, IntWritable, Text, IntWritable> {
- </code>
- </td>
- </tr>
- <tr>
- <td>78.</td>
- <td>
-
- <code>
- public void reduce(Text key, Iterator<IntWritable> values,
- OutputCollector<Text, IntWritable> output,
- Reporter reporter) throws IOException {
- </code>
- </td>
- </tr>
- <tr>
- <td>79.</td>
- <td>
-
- <code>int sum = 0;</code>
- </td>
- </tr>
- <tr>
- <td>80.</td>
- <td>
-
- <code>while (values.hasNext()) {</code>
- </td>
- </tr>
- <tr>
- <td>81.</td>
- <td>
-
- <code>sum += values.next().get();</code>
- </td>
- </tr>
- <tr>
- <td>82.</td>
- <td>
-
- <code>}</code>
- </td>
- </tr>
- <tr>
- <td>83.</td>
- <td>
-
- <code>output.collect(key, new IntWritable(sum));</code>
- </td>
- </tr>
- <tr>
- <td>84.</td>
- <td>
-
- <code>}</code>
- </td>
- </tr>
- <tr>
- <td>85.</td>
- <td>
-
- <code>}</code>
- </td>
- </tr>
- <tr>
- <td>86.</td>
- <td></td>
- </tr>
- <tr>
- <td>87.</td>
- <td>
-
- <code>public int run(String[] args) throws Exception {</code>
- </td>
- </tr>
- <tr>
- <td>88.</td>
- <td>
-
- <code>
- JobConf conf = new JobConf(getConf(), WordCount.class);
- </code>
- </td>
- </tr>
- <tr>
- <td>89.</td>
- <td>
-
- <code>conf.setJobName("wordcount");</code>
- </td>
- </tr>
- <tr>
- <td>90.</td>
- <td></td>
- </tr>
- <tr>
- <td>91.</td>
- <td>
-
- <code>conf.setOutputKeyClass(Text.class);</code>
- </td>
- </tr>
- <tr>
- <td>92.</td>
- <td>
-
- <code>conf.setOutputValueClass(IntWritable.class);</code>
- </td>
- </tr>
- <tr>
- <td>93.</td>
- <td></td>
- </tr>
- <tr>
- <td>94.</td>
- <td>
-
- <code>conf.setMapperClass(Map.class);</code>
- </td>
- </tr>
- <tr>
- <td>95.</td>
- <td>
-
- <code>conf.setCombinerClass(Reduce.class);</code>
- </td>
- </tr>
- <tr>
- <td>96.</td>
- <td>
-
- <code>conf.setReducerClass(Reduce.class);</code>
- </td>
- </tr>
- <tr>
- <td>97.</td>
- <td></td>
- </tr>
- <tr>
- <td>98.</td>
- <td>
-
- <code>conf.setInputFormat(TextInputFormat.class);</code>
- </td>
- </tr>
- <tr>
- <td>99.</td>
- <td>
-
- <code>conf.setOutputFormat(TextOutputFormat.class);</code>
- </td>
- </tr>
- <tr>
- <td>100.</td>
- <td></td>
- </tr>
- <tr>
- <td>101.</td>
- <td>
-
- <code>
- List<String> other_args = new ArrayList<String>();
- </code>
- </td>
- </tr>
- <tr>
- <td>102.</td>
- <td>
-
- <code>for (int i=0; i < args.length; ++i) {</code>
- </td>
- </tr>
- <tr>
- <td>103.</td>
- <td>
-
- <code>if ("-skip".equals(args[i])) {</code>
- </td>
- </tr>
- <tr>
- <td>104.</td>
- <td>
-
- <code>
- DistributedCache.addCacheFile(new Path(args[++i]).toUri(), conf);
- </code>
- </td>
- </tr>
- <tr>
- <td>105.</td>
- <td>
-
- <code>
- conf.setBoolean("wordcount.skip.patterns", true);
- </code>
- </td>
- </tr>
- <tr>
- <td>106.</td>
- <td>
-
- <code>} else {</code>
- </td>
- </tr>
- <tr>
- <td>107.</td>
- <td>
-
- <code>other_args.add(args[i]);</code>
- </td>
- </tr>
- <tr>
- <td>108.</td>
- <td>
-
- <code>}</code>
- </td>
- </tr>
- <tr>
- <td>109.</td>
- <td>
-
- <code>}</code>
- </td>
- </tr>
- <tr>
- <td>110.</td>
- <td></td>
- </tr>
- <tr>
- <td>111.</td>
- <td>
-
- <code>FileInputFormat.setInputPaths(conf, new Path(other_args.get(0)));</code>
- </td>
- </tr>
- <tr>
- <td>112.</td>
- <td>
-
- <code>FileOutputFormat.setOutputPath(conf, new Path(other_args.get(1)));</code>
- </td>
- </tr>
- <tr>
- <td>113.</td>
- <td></td>
- </tr>
- <tr>
- <td>114.</td>
- <td>
-
- <code>JobClient.runJob(conf);</code>
- </td>
- </tr>
- <tr>
- <td>115.</td>
- <td>
-
- <code>return 0;</code>
- </td>
- </tr>
- <tr>
- <td>116.</td>
- <td>
-
- <code>}</code>
- </td>
- </tr>
- <tr>
- <td>117.</td>
- <td></td>
- </tr>
- <tr>
- <td>118.</td>
- <td>
-
- <code>
- public static void main(String[] args) throws Exception {
- </code>
- </td>
- </tr>
- <tr>
- <td>119.</td>
- <td>
-
- <code>
- int res = ToolRunner.run(new Configuration(), new WordCount(),
- args);
- </code>
- </td>
- </tr>
- <tr>
- <td>120.</td>
- <td>
-
- <code>System.exit(res);</code>
- </td>
- </tr>
- <tr>
- <td>121.</td>
- <td>
-
- <code>}</code>
- </td>
- </tr>
- <tr>
- <td>122.</td>
- <td>
- <code>}</code>
- </td>
- </tr>
- <tr>
- <td>123.</td>
- <td></td>
- </tr>
- </table>
- </section>
-
- <section>
- <title>Sample Runs</title>
-
- <p>Sample text-files as input:</p>
- <p>
- <code>$ bin/hadoop dfs -ls /usr/joe/wordcount/input/</code><br/>
- <code>/usr/joe/wordcount/input/file01</code><br/>
- <code>/usr/joe/wordcount/input/file02</code><br/>
- <br/>
- <code>$ bin/hadoop dfs -cat /usr/joe/wordcount/input/file01</code><br/>
- <code>Hello World, Bye World!</code><br/>
- <br/>
- <code>$ bin/hadoop dfs -cat /usr/joe/wordcount/input/file02</code><br/>
- <code>Hello Hadoop, Goodbye to hadoop.</code>
- </p>
-
- <p>Run the application:</p>
- <p>
- <code>
- $ bin/hadoop jar /usr/joe/wordcount.jar org.myorg.WordCount
- /usr/joe/wordcount/input /usr/joe/wordcount/output
- </code>
- </p>
- <p>Output:</p>
- <p>
- <code>
- $ bin/hadoop dfs -cat /usr/joe/wordcount/output/part-00000
- </code>
- <br/>
- <code>Bye 1</code><br/>
- <code>Goodbye 1</code><br/>
- <code>Hadoop, 1</code><br/>
- <code>Hello 2</code><br/>
- <code>World! 1</code><br/>
- <code>World, 1</code><br/>
- <code>hadoop. 1</code><br/>
- <code>to 1</code><br/>
- </p>
-
- <p>Notice that the inputs differ from the first version we looked at,
- and how they affect the outputs.</p>
- <p>Now, lets plug-in a pattern-file which lists the word-patterns to be
- ignored, via the <code>DistributedCache</code>.</p>
-
- <p>
- <code>$ hadoop dfs -cat /user/joe/wordcount/patterns.txt</code><br/>
- <code>\.</code><br/>
- <code>\,</code><br/>
- <code>\!</code><br/>
- <code>to</code><br/>
- </p>
-
- <p>Run it again, this time with more options:</p>
- <p>
- <code>
- $ bin/hadoop jar /usr/joe/wordcount.jar org.myorg.WordCount
- -Dwordcount.case.sensitive=true /usr/joe/wordcount/input
- /usr/joe/wordcount/output -skip /user/joe/wordcount/patterns.txt
- </code>
- </p>
-
- <p>As expected, the output:</p>
- <p>
- <code>
- $ bin/hadoop dfs -cat /usr/joe/wordcount/output/part-00000
- </code>
- <br/>
- <code>Bye 1</code><br/>
- <code>Goodbye 1</code><br/>
- <code>Hadoop 1</code><br/>
- <code>Hello 2</code><br/>
- <code>World 2</code><br/>
- <code>hadoop 1</code><br/>
- </p>
-
- <p>Run it once more, this time switch-off case-sensitivity:</p>
- <p>
- <code>
- $ bin/hadoop jar /usr/joe/wordcount.jar org.myorg.WordCount
- -Dwordcount.case.sensitive=false /usr/joe/wordcount/input
- /usr/joe/wordcount/output -skip /user/joe/wordcount/patterns.txt
- </code>
- </p>
-
- <p>Sure enough, the output:</p>
- <p>
- <code>
- $ bin/hadoop dfs -cat /usr/joe/wordcount/output/part-00000
- </code>
- <br/>
- <code>bye 1</code><br/>
- <code>goodbye 1</code><br/>
- <code>hadoop 2</code><br/>
- <code>hello 2</code><br/>
- <code>world 2</code><br/>
- </p>
- </section>
-
- <section>
- <title>Highlights</title>
-
- <p>The second version of <code>WordCount</code> improves upon the
- previous one by using some features offered by the Map-Reduce framework:
- </p>
- <ul>
- <li>
- Demonstrates how applications can access configuration parameters
- in the <code>configure</code> method of the <code>Mapper</code> (and
- <code>Reducer</code>) implementations (lines 28-43).
- </li>
- <li>
- Demonstrates how the <code>DistributedCache</code> can be used to
- distribute read-only data needed by the jobs. Here it allows the user
- to specify word-patterns to skip while counting (line 104).
- </li>
- <li>
- Demonstrates the utility of the <code>Tool</code> interface and the
- <code>GenericOptionsParser</code> to handle generic Hadoop
- command-line options (lines 87-116, 119).
- </li>
- <li>
- Demonstrates how applications can use <code>Counters</code> (line 68)
- and how they can set application-specific status information via
- the <code>Reporter</code> instance passed to the <code>map</code> (and
- <code>reduce</code>) method (line 72).
- </li>
- </ul>
-
- </section>
- </section>
- <p>
- <em>Java and JNI are trademarks or registered trademarks of
- Sun Microsystems, Inc. in the United States and other countries.</em>
- </p>
-
- </body>
-
- </document>
|