123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122 |
- <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
- <html>
- <head>
- <META http-equiv="Content-Type" content="text/html; charset=UTF-8">
- <meta content="Apache Forrest" name="Generator">
- <meta name="Forrest-version" content="0.8">
- <meta name="Forrest-skin-name" content="pelt">
- <title>Hadoop Map/Reduce Tutorial</title>
- <link type="text/css" href="skin/basic.css" rel="stylesheet">
- <link media="screen" type="text/css" href="skin/screen.css" rel="stylesheet">
- <link media="print" type="text/css" href="skin/print.css" rel="stylesheet">
- <link type="text/css" href="skin/profile.css" rel="stylesheet">
- <script src="skin/getBlank.js" language="javascript" type="text/javascript"></script><script src="skin/getMenu.js" language="javascript" type="text/javascript"></script><script src="skin/fontsize.js" language="javascript" type="text/javascript"></script>
- <link rel="shortcut icon" href="images/favicon.ico">
- </head>
- <body onload="init()">
- <script type="text/javascript">ndeSetTextSize();</script>
- <div id="top">
- <!--+
- |breadtrail
- +-->
- <div class="breadtrail">
- <a href="http://www.apache.org/">Apache</a> > <a href="http://hadoop.apache.org/">Hadoop</a> > <a href="http://hadoop.apache.org/core/">Core</a><script src="skin/breadcrumbs.js" language="JavaScript" type="text/javascript"></script>
- </div>
- <!--+
- |header
- +-->
- <div class="header">
- <!--+
- |start group logo
- +-->
- <div class="grouplogo">
- <a href="http://hadoop.apache.org/"><img class="logoImage" alt="Hadoop" src="images/hadoop-logo.jpg" title="Apache Hadoop"></a>
- </div>
- <!--+
- |end group logo
- +-->
- <!--+
- |start Project Logo
- +-->
- <div class="projectlogo">
- <a href="http://hadoop.apache.org/core/"><img class="logoImage" alt="Hadoop" src="images/core-logo.gif" title="Scalable Computing Platform"></a>
- </div>
- <!--+
- |end Project Logo
- +-->
- <!--+
- |start Search
- +-->
- <div class="searchbox">
- <form action="http://www.google.com/search" method="get" class="roundtopsmall">
- <input value="hadoop.apache.org" name="sitesearch" type="hidden"><input onFocus="getBlank (this, 'Search the site with google');" size="25" name="q" id="query" type="text" value="Search the site with google">
- <input name="Search" value="Search" type="submit">
- </form>
- </div>
- <!--+
- |end search
- +-->
- <!--+
- |start Tabs
- +-->
- <ul id="tabs">
- <li>
- <a class="unselected" href="http://hadoop.apache.org/core/">Project</a>
- </li>
- <li>
- <a class="unselected" href="http://wiki.apache.org/hadoop">Wiki</a>
- </li>
- <li class="current">
- <a class="selected" href="index.html">Hadoop 0.19 Documentation</a>
- </li>
- </ul>
- <!--+
- |end Tabs
- +-->
- </div>
- </div>
- <div id="main">
- <div id="publishedStrip">
- <!--+
- |start Subtabs
- +-->
- <div id="level2tabs"></div>
- <!--+
- |end Endtabs
- +-->
- <script type="text/javascript"><!--
- document.write("Last Published: " + document.lastModified);
- // --></script>
- </div>
- <!--+
- |breadtrail
- +-->
- <div class="breadtrail">
-
- </div>
- <!--+
- |start Menu, mainarea
- +-->
- <!--+
- |start Menu
- +-->
- <div id="menu">
- <div onclick="SwitchMenu('menu_selected_1.1', 'skin/')" id="menu_selected_1.1Title" class="menutitle" style="background-image: url('skin/images/chapter_open.gif');">Documentation</div>
- <div id="menu_selected_1.1" class="selectedmenuitemgroup" style="display: block;">
- <div class="menuitem">
- <a href="index.html">Overview</a>
- </div>
- <div class="menuitem">
- <a href="quickstart.html">Hadoop Quick Start</a>
- </div>
- <div class="menuitem">
- <a href="cluster_setup.html">Hadoop Cluster Setup</a>
- </div>
- <div class="menupage">
- <div class="menupagetitle">Hadoop Map/Reduce Tutorial</div>
- </div>
- <div class="menuitem">
- <a href="commands_manual.html">Hadoop Command Guide</a>
- </div>
- <div class="menuitem">
- <a href="hdfs_shell.html">Hadoop FS Shell Guide</a>
- </div>
- <div class="menuitem">
- <a href="distcp.html">Hadoop DistCp Guide</a>
- </div>
- <div class="menuitem">
- <a href="native_libraries.html">Hadoop Native Libraries</a>
- </div>
- <div class="menuitem">
- <a href="streaming.html">Hadoop Streaming</a>
- </div>
- <div class="menuitem">
- <a href="hadoop_archives.html">Hadoop Archives</a>
- </div>
- <div class="menuitem">
- <a href="hdfs_user_guide.html">HDFS User Guide</a>
- </div>
- <div class="menuitem">
- <a href="hdfs_design.html">HDFS Architecture</a>
- </div>
- <div class="menuitem">
- <a href="hdfs_permissions_guide.html">HDFS Admin Guide: Permissions</a>
- </div>
- <div class="menuitem">
- <a href="hdfs_quota_admin_guide.html">HDFS Admin Guide: Quotas</a>
- </div>
- <div class="menuitem">
- <a href="SLG_user_guide.html">HDFS Utilities</a>
- </div>
- <div class="menuitem">
- <a href="libhdfs.html">HDFS C API</a>
- </div>
- <div class="menuitem">
- <a href="hod_user_guide.html">HOD User Guide</a>
- </div>
- <div class="menuitem">
- <a href="hod_admin_guide.html">HOD Admin Guide</a>
- </div>
- <div class="menuitem">
- <a href="hod_config_guide.html">HOD Config Guide</a>
- </div>
- <div class="menuitem">
- <a href="capacity_scheduler.html">Capacity Scheduler</a>
- </div>
- <div class="menuitem">
- <a href="api/index.html">API Docs</a>
- </div>
- <div class="menuitem">
- <a href="jdiff/changes.html">API Changes</a>
- </div>
- <div class="menuitem">
- <a href="http://wiki.apache.org/hadoop/">Wiki</a>
- </div>
- <div class="menuitem">
- <a href="http://wiki.apache.org/hadoop/FAQ">FAQ</a>
- </div>
- <div class="menuitem">
- <a href="releasenotes.html">Release Notes</a>
- </div>
- <div class="menuitem">
- <a href="changes.html">Change Log</a>
- </div>
- </div>
- <div id="credit"></div>
- <div id="roundbottom">
- <img style="display: none" class="corner" height="15" width="15" alt="" src="skin/images/rc-b-l-15-1body-2menu-3menu.png"></div>
- <!--+
- |alternative credits
- +-->
- <div id="credit2"></div>
- </div>
- <!--+
- |end Menu
- +-->
- <!--+
- |start content
- +-->
- <div id="content">
- <div title="Portable Document Format" class="pdflink">
- <a class="dida" href="mapred_tutorial.pdf"><img alt="PDF -icon" src="skin/images/pdfdoc.gif" class="skin"><br>
- PDF</a>
- </div>
- <h1>Hadoop Map/Reduce Tutorial</h1>
- <div id="minitoc-area">
- <ul class="minitoc">
- <li>
- <a href="#Purpose">Purpose</a>
- </li>
- <li>
- <a href="#Pre-requisites">Pre-requisites</a>
- </li>
- <li>
- <a href="#Overview">Overview</a>
- </li>
- <li>
- <a href="#Inputs+and+Outputs">Inputs and Outputs</a>
- </li>
- <li>
- <a href="#Example%3A+WordCount+v1.0">Example: WordCount v1.0</a>
- <ul class="minitoc">
- <li>
- <a href="#Source+Code">Source Code</a>
- </li>
- <li>
- <a href="#Usage">Usage</a>
- </li>
- <li>
- <a href="#Walk-through">Walk-through</a>
- </li>
- </ul>
- </li>
- <li>
- <a href="#Map%2FReduce+-+User+Interfaces">Map/Reduce - User Interfaces</a>
- <ul class="minitoc">
- <li>
- <a href="#Payload">Payload</a>
- <ul class="minitoc">
- <li>
- <a href="#Mapper">Mapper</a>
- </li>
- <li>
- <a href="#Reducer">Reducer</a>
- </li>
- <li>
- <a href="#Partitioner">Partitioner</a>
- </li>
- <li>
- <a href="#Reporter">Reporter</a>
- </li>
- <li>
- <a href="#OutputCollector">OutputCollector</a>
- </li>
- </ul>
- </li>
- <li>
- <a href="#Job+Configuration">Job Configuration</a>
- </li>
- <li>
- <a href="#Task+Execution+%26+Environment">Task Execution & Environment</a>
- <ul class="minitoc">
- <li>
- <a href="#Memory+management"> Memory management</a>
- </li>
- <li>
- <a href="#Map+Parameters">Map Parameters</a>
- </li>
- <li>
- <a href="#Shuffle%2FReduce+Parameters">Shuffle/Reduce Parameters</a>
- </li>
- <li>
- <a href="#Directory+Structure"> Directory Structure </a>
- </li>
- <li>
- <a href="#Task+JVM+Reuse">Task JVM Reuse</a>
- </li>
- </ul>
- </li>
- <li>
- <a href="#Job+Submission+and+Monitoring">Job Submission and Monitoring</a>
- <ul class="minitoc">
- <li>
- <a href="#Job+Control">Job Control</a>
- </li>
- </ul>
- </li>
- <li>
- <a href="#Job+Input">Job Input</a>
- <ul class="minitoc">
- <li>
- <a href="#InputSplit">InputSplit</a>
- </li>
- <li>
- <a href="#RecordReader">RecordReader</a>
- </li>
- </ul>
- </li>
- <li>
- <a href="#Job+Output">Job Output</a>
- <ul class="minitoc">
- <li>
- <a href="#OutputCommitter">OutputCommitter</a>
- </li>
- <li>
- <a href="#Task+Side-Effect+Files">Task Side-Effect Files</a>
- </li>
- <li>
- <a href="#RecordWriter">RecordWriter</a>
- </li>
- </ul>
- </li>
- <li>
- <a href="#Other+Useful+Features">Other Useful Features</a>
- <ul class="minitoc">
- <li>
- <a href="#Submitting+Jobs+to+Queues">Submitting Jobs to Queues</a>
- </li>
- <li>
- <a href="#Counters">Counters</a>
- </li>
- <li>
- <a href="#DistributedCache">DistributedCache</a>
- </li>
- <li>
- <a href="#Tool">Tool</a>
- </li>
- <li>
- <a href="#IsolationRunner">IsolationRunner</a>
- </li>
- <li>
- <a href="#Profiling">Profiling</a>
- </li>
- <li>
- <a href="#Debugging">Debugging</a>
- </li>
- <li>
- <a href="#JobControl">JobControl</a>
- </li>
- <li>
- <a href="#Data+Compression">Data Compression</a>
- </li>
- <li>
- <a href="#Skipping+Bad+Records">Skipping Bad Records</a>
- </li>
- </ul>
- </li>
- </ul>
- </li>
- <li>
- <a href="#Example%3A+WordCount+v2.0">Example: WordCount v2.0</a>
- <ul class="minitoc">
- <li>
- <a href="#Source+Code-N10FA4">Source Code</a>
- </li>
- <li>
- <a href="#Sample+Runs">Sample Runs</a>
- </li>
- <li>
- <a href="#Highlights">Highlights</a>
- </li>
- </ul>
- </li>
- </ul>
- </div>
-
-
- <a name="N1000D"></a><a name="Purpose"></a>
- <h2 class="h3">Purpose</h2>
- <div class="section">
- <p>This document comprehensively describes all user-facing facets of the
- Hadoop Map/Reduce framework and serves as a tutorial.
- </p>
- </div>
-
-
- <a name="N10017"></a><a name="Pre-requisites"></a>
- <h2 class="h3">Pre-requisites</h2>
- <div class="section">
- <p>Ensure that Hadoop is installed, configured and is running. More
- details:</p>
- <ul>
-
- <li>
-
- <a href="quickstart.html">Hadoop Quick Start</a> for first-time users.
- </li>
-
- <li>
-
- <a href="cluster_setup.html">Hadoop Cluster Setup</a> for large,
- distributed clusters.
- </li>
-
- </ul>
- </div>
-
-
- <a name="N10032"></a><a name="Overview"></a>
- <h2 class="h3">Overview</h2>
- <div class="section">
- <p>Hadoop Map/Reduce is a software framework for easily writing
- applications which process vast amounts of data (multi-terabyte data-sets)
- in-parallel on large clusters (thousands of nodes) of commodity
- hardware in a reliable, fault-tolerant manner.</p>
- <p>A Map/Reduce <em>job</em> usually splits the input data-set into
- independent chunks which are processed by the <em>map tasks</em> in a
- completely parallel manner. The framework sorts the outputs of the maps,
- which are then input to the <em>reduce tasks</em>. Typically both the
- input and the output of the job are stored in a file-system. The framework
- takes care of scheduling tasks, monitoring them and re-executes the failed
- tasks.</p>
- <p>Typically the compute nodes and the storage nodes are the same, that is,
- the Map/Reduce framework and the Hadoop Distributed File System (see <a href="hdfs_design.html">HDFS Architecture </a>)
- are running on the same set of nodes. This configuration
- allows the framework to effectively schedule tasks on the nodes where data
- is already present, resulting in very high aggregate bandwidth across the
- cluster.</p>
- <p>The Map/Reduce framework consists of a single master
- <span class="codefrag">JobTracker</span> and one slave <span class="codefrag">TaskTracker</span> per
- cluster-node. The master is responsible for scheduling the jobs' component
- tasks on the slaves, monitoring them and re-executing the failed tasks. The
- slaves execute the tasks as directed by the master.</p>
- <p>Minimally, applications specify the input/output locations and supply
- <em>map</em> and <em>reduce</em> functions via implementations of
- appropriate interfaces and/or abstract-classes. These, and other job
- parameters, comprise the <em>job configuration</em>. The Hadoop
- <em>job client</em> then submits the job (jar/executable etc.) and
- configuration to the <span class="codefrag">JobTracker</span> which then assumes the
- responsibility of distributing the software/configuration to the slaves,
- scheduling tasks and monitoring them, providing status and diagnostic
- information to the job-client.</p>
- <p>Although the Hadoop framework is implemented in Java<sup>TM</sup>,
- Map/Reduce applications need not be written in Java.</p>
- <ul>
-
- <li>
-
- <a href="api/org/apache/hadoop/streaming/package-summary.html">
- Hadoop Streaming</a> is a utility which allows users to create and run
- jobs with any executables (e.g. shell utilities) as the mapper and/or
- the reducer.
- </li>
-
- <li>
-
- <a href="api/org/apache/hadoop/mapred/pipes/package-summary.html">
- Hadoop Pipes</a> is a <a href="http://www.swig.org/">SWIG</a>-
- compatible <em>C++ API</em> to implement Map/Reduce applications (non
- JNI<sup>TM</sup> based).
- </li>
-
- </ul>
- </div>
-
-
- <a name="N1008B"></a><a name="Inputs+and+Outputs"></a>
- <h2 class="h3">Inputs and Outputs</h2>
- <div class="section">
- <p>The Map/Reduce framework operates exclusively on
- <span class="codefrag"><key, value></span> pairs, that is, the framework views the
- input to the job as a set of <span class="codefrag"><key, value></span> pairs and
- produces a set of <span class="codefrag"><key, value></span> pairs as the output of
- the job, conceivably of different types.</p>
- <p>The <span class="codefrag">key</span> and <span class="codefrag">value</span> classes have to be
- serializable by the framework and hence need to implement the
- <a href="api/org/apache/hadoop/io/Writable.html">Writable</a>
- interface. Additionally, the <span class="codefrag">key</span> classes have to implement the
- <a href="api/org/apache/hadoop/io/WritableComparable.html">
- WritableComparable</a> interface to facilitate sorting by the framework.
- </p>
- <p>Input and Output types of a Map/Reduce job:</p>
- <p>
- (input) <span class="codefrag"><k1, v1></span>
- ->
- <strong>map</strong>
- ->
- <span class="codefrag"><k2, v2></span>
- ->
- <strong>combine</strong>
- ->
- <span class="codefrag"><k2, v2></span>
- ->
- <strong>reduce</strong>
- ->
- <span class="codefrag"><k3, v3></span> (output)
- </p>
- </div>
-
- <a name="N100CD"></a><a name="Example%3A+WordCount+v1.0"></a>
- <h2 class="h3">Example: WordCount v1.0</h2>
- <div class="section">
- <p>Before we jump into the details, lets walk through an example Map/Reduce
- application to get a flavour for how they work.</p>
- <p>
- <span class="codefrag">WordCount</span> is a simple application that counts the number of
- occurences of each word in a given input set.</p>
- <p>This works with a local-standalone, pseudo-distributed or fully-distributed
- Hadoop installation(see <a href="quickstart.html"> Hadoop Quick Start</a>).</p>
- <a name="N100E2"></a><a name="Source+Code"></a>
- <h3 class="h4">Source Code</h3>
- <table class="ForrestTable" cellspacing="1" cellpadding="4">
-
- <tr>
-
- <th colspan="1" rowspan="1"></th>
- <th colspan="1" rowspan="1">WordCount.java</th>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">1.</td>
- <td colspan="1" rowspan="1">
- <span class="codefrag">package org.myorg;</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">2.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">3.</td>
- <td colspan="1" rowspan="1">
- <span class="codefrag">import java.io.IOException;</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">4.</td>
- <td colspan="1" rowspan="1">
- <span class="codefrag">import java.util.*;</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">5.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">6.</td>
- <td colspan="1" rowspan="1">
- <span class="codefrag">import org.apache.hadoop.fs.Path;</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">7.</td>
- <td colspan="1" rowspan="1">
- <span class="codefrag">import org.apache.hadoop.conf.*;</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">8.</td>
- <td colspan="1" rowspan="1">
- <span class="codefrag">import org.apache.hadoop.io.*;</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">9.</td>
- <td colspan="1" rowspan="1">
- <span class="codefrag">import org.apache.hadoop.mapred.*;</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">10.</td>
- <td colspan="1" rowspan="1">
- <span class="codefrag">import org.apache.hadoop.util.*;</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">11.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">12.</td>
- <td colspan="1" rowspan="1">
- <span class="codefrag">public class WordCount {</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">13.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">14.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">
- public static class Map extends MapReduceBase
- implements Mapper<LongWritable, Text, Text, IntWritable> {
- </span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">15.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">
- private final static IntWritable one = new IntWritable(1);
- </span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">16.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">private Text word = new Text();</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">17.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">18.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">
- public void map(LongWritable key, Text value,
- OutputCollector<Text, IntWritable> output,
- Reporter reporter) throws IOException {
- </span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">19.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">String line = value.toString();</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">20.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">StringTokenizer tokenizer = new StringTokenizer(line);</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">21.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">while (tokenizer.hasMoreTokens()) {</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">22.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">word.set(tokenizer.nextToken());</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">23.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">output.collect(word, one);</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">24.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">}</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">25.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">}</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">26.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">}</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">27.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">28.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">
- public static class Reduce extends MapReduceBase implements
- Reducer<Text, IntWritable, Text, IntWritable> {
- </span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">29.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">
- public void reduce(Text key, Iterator<IntWritable> values,
- OutputCollector<Text, IntWritable> output,
- Reporter reporter) throws IOException {
- </span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">30.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">int sum = 0;</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">31.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">while (values.hasNext()) {</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">32.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">sum += values.next().get();</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">33.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">}</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">34.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">output.collect(key, new IntWritable(sum));</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">35.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">}</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">36.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">}</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">37.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">38.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">
- public static void main(String[] args) throws Exception {
- </span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">39.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">
- JobConf conf = new JobConf(WordCount.class);
- </span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">40.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">conf.setJobName("wordcount");</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">41.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">42.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">conf.setOutputKeyClass(Text.class);</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">43.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">conf.setOutputValueClass(IntWritable.class);</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">44.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">45.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">conf.setMapperClass(Map.class);</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">46.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">conf.setCombinerClass(Reduce.class);</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">47.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">conf.setReducerClass(Reduce.class);</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">48.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">49.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">conf.setInputFormat(TextInputFormat.class);</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">50.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">conf.setOutputFormat(TextOutputFormat.class);</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">51.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">52.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">FileInputFormat.setInputPaths(conf, new Path(args[0]));</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">53.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">FileOutputFormat.setOutputPath(conf, new Path(args[1]));</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">54.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">55.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">JobClient.runJob(conf);</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">57.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">}</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">58.</td>
- <td colspan="1" rowspan="1">
- <span class="codefrag">}</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">59.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- </table>
- <a name="N10464"></a><a name="Usage"></a>
- <h3 class="h4">Usage</h3>
- <p>Assuming <span class="codefrag">HADOOP_HOME</span> is the root of the installation and
- <span class="codefrag">HADOOP_VERSION</span> is the Hadoop version installed, compile
- <span class="codefrag">WordCount.java</span> and create a jar:</p>
- <p>
-
- <span class="codefrag">$ mkdir wordcount_classes</span>
- <br>
-
- <span class="codefrag">
- $ javac -classpath ${HADOOP_HOME}/hadoop-${HADOOP_VERSION}-core.jar
- -d wordcount_classes WordCount.java
- </span>
- <br>
-
- <span class="codefrag">$ jar -cvf /usr/joe/wordcount.jar -C wordcount_classes/ .</span>
-
- </p>
- <p>Assuming that:</p>
- <ul>
-
- <li>
-
- <span class="codefrag">/usr/joe/wordcount/input</span> - input directory in HDFS
- </li>
-
- <li>
-
- <span class="codefrag">/usr/joe/wordcount/output</span> - output directory in HDFS
- </li>
-
- </ul>
- <p>Sample text-files as input:</p>
- <p>
-
- <span class="codefrag">$ bin/hadoop dfs -ls /usr/joe/wordcount/input/</span>
- <br>
-
- <span class="codefrag">/usr/joe/wordcount/input/file01</span>
- <br>
-
- <span class="codefrag">/usr/joe/wordcount/input/file02</span>
- <br>
-
- <br>
-
- <span class="codefrag">$ bin/hadoop dfs -cat /usr/joe/wordcount/input/file01</span>
- <br>
-
- <span class="codefrag">Hello World Bye World</span>
- <br>
-
- <br>
-
- <span class="codefrag">$ bin/hadoop dfs -cat /usr/joe/wordcount/input/file02</span>
- <br>
-
- <span class="codefrag">Hello Hadoop Goodbye Hadoop</span>
-
- </p>
- <p>Run the application:</p>
- <p>
-
- <span class="codefrag">
- $ bin/hadoop jar /usr/joe/wordcount.jar org.myorg.WordCount
- /usr/joe/wordcount/input /usr/joe/wordcount/output
- </span>
-
- </p>
- <p>Output:</p>
- <p>
-
- <span class="codefrag">
- $ bin/hadoop dfs -cat /usr/joe/wordcount/output/part-00000
- </span>
-
- <br>
-
- <span class="codefrag">Bye 1</span>
- <br>
-
- <span class="codefrag">Goodbye 1</span>
- <br>
-
- <span class="codefrag">Hadoop 2</span>
- <br>
-
- <span class="codefrag">Hello 2</span>
- <br>
-
- <span class="codefrag">World 2</span>
- <br>
-
- </p>
- <p> Applications can specify a comma separated list of paths which
- would be present in the current working directory of the task
- using the option <span class="codefrag">-files</span>. The <span class="codefrag">-libjars</span>
- option allows applications to add jars to the classpaths of the maps
- and reduces. The <span class="codefrag">-archives</span> allows them to pass archives
- as arguments that are unzipped/unjarred and a link with name of the
- jar/zip are created in the current working directory of tasks. More
- details about the command line options are available at
- <a href="commands_manual.html"> Hadoop Command Guide.</a>
- </p>
- <p>Running <span class="codefrag">wordcount</span> example with
- <span class="codefrag">-libjars</span> and <span class="codefrag">-files</span>:<br>
-
- <span class="codefrag"> hadoop jar hadoop-examples.jar wordcount -files cachefile.txt
- -libjars mylib.jar input output </span>
-
- </p>
- <a name="N10504"></a><a name="Walk-through"></a>
- <h3 class="h4">Walk-through</h3>
- <p>The <span class="codefrag">WordCount</span> application is quite straight-forward.</p>
- <p>The <span class="codefrag">Mapper</span> implementation (lines 14-26), via the
- <span class="codefrag">map</span> method (lines 18-25), processes one line at a time,
- as provided by the specified <span class="codefrag">TextInputFormat</span> (line 49).
- It then splits the line into tokens separated by whitespaces, via the
- <span class="codefrag">StringTokenizer</span>, and emits a key-value pair of
- <span class="codefrag">< <word>, 1></span>.</p>
- <p>
- For the given sample input the first map emits:<br>
-
- <span class="codefrag">< Hello, 1></span>
- <br>
-
- <span class="codefrag">< World, 1></span>
- <br>
-
- <span class="codefrag">< Bye, 1></span>
- <br>
-
- <span class="codefrag">< World, 1></span>
- <br>
-
- </p>
- <p>
- The second map emits:<br>
-
- <span class="codefrag">< Hello, 1></span>
- <br>
-
- <span class="codefrag">< Hadoop, 1></span>
- <br>
-
- <span class="codefrag">< Goodbye, 1></span>
- <br>
-
- <span class="codefrag">< Hadoop, 1></span>
- <br>
-
- </p>
- <p>We'll learn more about the number of maps spawned for a given job, and
- how to control them in a fine-grained manner, a bit later in the
- tutorial.</p>
- <p>
- <span class="codefrag">WordCount</span> also specifies a <span class="codefrag">combiner</span> (line
- 46). Hence, the output of each map is passed through the local combiner
- (which is same as the <span class="codefrag">Reducer</span> as per the job
- configuration) for local aggregation, after being sorted on the
- <em>key</em>s.</p>
- <p>
- The output of the first map:<br>
-
- <span class="codefrag">< Bye, 1></span>
- <br>
-
- <span class="codefrag">< Hello, 1></span>
- <br>
-
- <span class="codefrag">< World, 2></span>
- <br>
-
- </p>
- <p>
- The output of the second map:<br>
-
- <span class="codefrag">< Goodbye, 1></span>
- <br>
-
- <span class="codefrag">< Hadoop, 2></span>
- <br>
-
- <span class="codefrag">< Hello, 1></span>
- <br>
-
- </p>
- <p>The <span class="codefrag">Reducer</span> implementation (lines 28-36), via the
- <span class="codefrag">reduce</span> method (lines 29-35) just sums up the values,
- which are the occurence counts for each key (i.e. words in this example).
- </p>
- <p>
- Thus the output of the job is:<br>
-
- <span class="codefrag">< Bye, 1></span>
- <br>
-
- <span class="codefrag">< Goodbye, 1></span>
- <br>
-
- <span class="codefrag">< Hadoop, 2></span>
- <br>
-
- <span class="codefrag">< Hello, 2></span>
- <br>
-
- <span class="codefrag">< World, 2></span>
- <br>
-
- </p>
- <p>The <span class="codefrag">run</span> method specifies various facets of the job, such
- as the input/output paths (passed via the command line), key/value
- types, input/output formats etc., in the <span class="codefrag">JobConf</span>.
- It then calls the <span class="codefrag">JobClient.runJob</span> (line 55) to submit the
- and monitor its progress.</p>
- <p>We'll learn more about <span class="codefrag">JobConf</span>, <span class="codefrag">JobClient</span>,
- <span class="codefrag">Tool</span> and other interfaces and classes a bit later in the
- tutorial.</p>
- </div>
-
-
- <a name="N105BB"></a><a name="Map%2FReduce+-+User+Interfaces"></a>
- <h2 class="h3">Map/Reduce - User Interfaces</h2>
- <div class="section">
- <p>This section provides a reasonable amount of detail on every user-facing
- aspect of the Map/Reduce framwork. This should help users implement,
- configure and tune their jobs in a fine-grained manner. However, please
- note that the javadoc for each class/interface remains the most
- comprehensive documentation available; this is only meant to be a tutorial.
- </p>
- <p>Let us first take the <span class="codefrag">Mapper</span> and <span class="codefrag">Reducer</span>
- interfaces. Applications typically implement them to provide the
- <span class="codefrag">map</span> and <span class="codefrag">reduce</span> methods.</p>
- <p>We will then discuss other core interfaces including
- <span class="codefrag">JobConf</span>, <span class="codefrag">JobClient</span>, <span class="codefrag">Partitioner</span>,
- <span class="codefrag">OutputCollector</span>, <span class="codefrag">Reporter</span>,
- <span class="codefrag">InputFormat</span>, <span class="codefrag">OutputFormat</span>,
- <span class="codefrag">OutputCommitter</span> and others.</p>
- <p>Finally, we will wrap up by discussing some useful features of the
- framework such as the <span class="codefrag">DistributedCache</span>,
- <span class="codefrag">IsolationRunner</span> etc.</p>
- <a name="N105F7"></a><a name="Payload"></a>
- <h3 class="h4">Payload</h3>
- <p>Applications typically implement the <span class="codefrag">Mapper</span> and
- <span class="codefrag">Reducer</span> interfaces to provide the <span class="codefrag">map</span> and
- <span class="codefrag">reduce</span> methods. These form the core of the job.</p>
- <a name="N1060C"></a><a name="Mapper"></a>
- <h4>Mapper</h4>
- <p>
- <a href="api/org/apache/hadoop/mapred/Mapper.html">
- Mapper</a> maps input key/value pairs to a set of intermediate
- key/value pairs.</p>
- <p>Maps are the individual tasks that transform input records into
- intermediate records. The transformed intermediate records do not need
- to be of the same type as the input records. A given input pair may
- map to zero or many output pairs.</p>
- <p>The Hadoop Map/Reduce framework spawns one map task for each
- <span class="codefrag">InputSplit</span> generated by the <span class="codefrag">InputFormat</span> for
- the job.</p>
- <p>Overall, <span class="codefrag">Mapper</span> implementations are passed the
- <span class="codefrag">JobConf</span> for the job via the
- <a href="api/org/apache/hadoop/mapred/JobConfigurable.html#configure(org.apache.hadoop.mapred.JobConf)">
- JobConfigurable.configure(JobConf)</a> method and override it to
- initialize themselves. The framework then calls
- <a href="api/org/apache/hadoop/mapred/Mapper.html#map(K1, V1, org.apache.hadoop.mapred.OutputCollector, org.apache.hadoop.mapred.Reporter)">
- map(WritableComparable, Writable, OutputCollector, Reporter)</a> for
- each key/value pair in the <span class="codefrag">InputSplit</span> for that task.
- Applications can then override the
- <a href="api/org/apache/hadoop/io/Closeable.html#close()">
- Closeable.close()</a> method to perform any required cleanup.</p>
- <p>Output pairs do not need to be of the same types as input pairs. A
- given input pair may map to zero or many output pairs. Output pairs
- are collected with calls to
- <a href="api/org/apache/hadoop/mapred/OutputCollector.html#collect(K, V)">
- OutputCollector.collect(WritableComparable,Writable)</a>.</p>
- <p>Applications can use the <span class="codefrag">Reporter</span> to report
- progress, set application-level status messages and update
- <span class="codefrag">Counters</span>, or just indicate that they are alive.</p>
- <p>All intermediate values associated with a given output key are
- subsequently grouped by the framework, and passed to the
- <span class="codefrag">Reducer</span>(s) to determine the final output. Users can
- control the grouping by specifying a <span class="codefrag">Comparator</span> via
- <a href="api/org/apache/hadoop/mapred/JobConf.html#setOutputKeyComparatorClass(java.lang.Class)">
- JobConf.setOutputKeyComparatorClass(Class)</a>.</p>
- <p>The <span class="codefrag">Mapper</span> outputs are sorted and then
- partitioned per <span class="codefrag">Reducer</span>. The total number of partitions is
- the same as the number of reduce tasks for the job. Users can control
- which keys (and hence records) go to which <span class="codefrag">Reducer</span> by
- implementing a custom <span class="codefrag">Partitioner</span>.</p>
- <p>Users can optionally specify a <span class="codefrag">combiner</span>, via
- <a href="api/org/apache/hadoop/mapred/JobConf.html#setCombinerClass(java.lang.Class)">
- JobConf.setCombinerClass(Class)</a>, to perform local aggregation of
- the intermediate outputs, which helps to cut down the amount of data
- transferred from the <span class="codefrag">Mapper</span> to the <span class="codefrag">Reducer</span>.
- </p>
- <p>The intermediate, sorted outputs are always stored in a simple
- (key-len, key, value-len, value) format.
- Applications can control if, and how, the
- intermediate outputs are to be compressed and the
- <a href="api/org/apache/hadoop/io/compress/CompressionCodec.html">
- CompressionCodec</a> to be used via the <span class="codefrag">JobConf</span>.
- </p>
- <a name="N10682"></a><a name="How+Many+Maps%3F"></a>
- <h5>How Many Maps?</h5>
- <p>The number of maps is usually driven by the total size of the
- inputs, that is, the total number of blocks of the input files.</p>
- <p>The right level of parallelism for maps seems to be around 10-100
- maps per-node, although it has been set up to 300 maps for very
- cpu-light map tasks. Task setup takes awhile, so it is best if the
- maps take at least a minute to execute.</p>
- <p>Thus, if you expect 10TB of input data and have a blocksize of
- <span class="codefrag">128MB</span>, you'll end up with 82,000 maps, unless
- <a href="api/org/apache/hadoop/mapred/JobConf.html#setNumMapTasks(int)">
- setNumMapTasks(int)</a> (which only provides a hint to the framework)
- is used to set it even higher.</p>
- <a name="N1069A"></a><a name="Reducer"></a>
- <h4>Reducer</h4>
- <p>
- <a href="api/org/apache/hadoop/mapred/Reducer.html">
- Reducer</a> reduces a set of intermediate values which share a key to
- a smaller set of values.</p>
- <p>The number of reduces for the job is set by the user
- via <a href="api/org/apache/hadoop/mapred/JobConf.html#setNumReduceTasks(int)">
- JobConf.setNumReduceTasks(int)</a>.</p>
- <p>Overall, <span class="codefrag">Reducer</span> implementations are passed the
- <span class="codefrag">JobConf</span> for the job via the
- <a href="api/org/apache/hadoop/mapred/JobConfigurable.html#configure(org.apache.hadoop.mapred.JobConf)">
- JobConfigurable.configure(JobConf)</a> method and can override it to
- initialize themselves. The framework then calls
- <a href="api/org/apache/hadoop/mapred/Reducer.html#reduce(K2, java.util.Iterator, org.apache.hadoop.mapred.OutputCollector, org.apache.hadoop.mapred.Reporter)">
- reduce(WritableComparable, Iterator, OutputCollector, Reporter)</a>
- method for each <span class="codefrag"><key, (list of values)></span>
- pair in the grouped inputs. Applications can then override the
- <a href="api/org/apache/hadoop/io/Closeable.html#close()">
- Closeable.close()</a> method to perform any required cleanup.</p>
- <p>
- <span class="codefrag">Reducer</span> has 3 primary phases: shuffle, sort and reduce.
- </p>
- <a name="N106CA"></a><a name="Shuffle"></a>
- <h5>Shuffle</h5>
- <p>Input to the <span class="codefrag">Reducer</span> is the sorted output of the
- mappers. In this phase the framework fetches the relevant partition
- of the output of all the mappers, via HTTP.</p>
- <a name="N106D7"></a><a name="Sort"></a>
- <h5>Sort</h5>
- <p>The framework groups <span class="codefrag">Reducer</span> inputs by keys (since
- different mappers may have output the same key) in this stage.</p>
- <p>The shuffle and sort phases occur simultaneously; while
- map-outputs are being fetched they are merged.</p>
- <a name="N106E6"></a><a name="Secondary+Sort"></a>
- <h5>Secondary Sort</h5>
- <p>If equivalence rules for grouping the intermediate keys are
- required to be different from those for grouping keys before
- reduction, then one may specify a <span class="codefrag">Comparator</span> via
- <a href="api/org/apache/hadoop/mapred/JobConf.html#setOutputValueGroupingComparator(java.lang.Class)">
- JobConf.setOutputValueGroupingComparator(Class)</a>. Since
- <a href="api/org/apache/hadoop/mapred/JobConf.html#setOutputKeyComparatorClass(java.lang.Class)">
- JobConf.setOutputKeyComparatorClass(Class)</a> can be used to
- control how intermediate keys are grouped, these can be used in
- conjunction to simulate <em>secondary sort on values</em>.</p>
- <a name="N106FF"></a><a name="Reduce"></a>
- <h5>Reduce</h5>
- <p>In this phase the
- <a href="api/org/apache/hadoop/mapred/Reducer.html#reduce(K2, java.util.Iterator, org.apache.hadoop.mapred.OutputCollector, org.apache.hadoop.mapred.Reporter)">
- reduce(WritableComparable, Iterator, OutputCollector, Reporter)</a>
- method is called for each <span class="codefrag"><key, (list of values)></span>
- pair in the grouped inputs.</p>
- <p>The output of the reduce task is typically written to the
- <a href="api/org/apache/hadoop/fs/FileSystem.html">
- FileSystem</a> via
- <a href="api/org/apache/hadoop/mapred/OutputCollector.html#collect(K, V)">
- OutputCollector.collect(WritableComparable, Writable)</a>.</p>
- <p>Applications can use the <span class="codefrag">Reporter</span> to report
- progress, set application-level status messages and update
- <span class="codefrag">Counters</span>, or just indicate that they are alive.</p>
- <p>The output of the <span class="codefrag">Reducer</span> is <em>not sorted</em>.</p>
- <a name="N1072D"></a><a name="How+Many+Reduces%3F"></a>
- <h5>How Many Reduces?</h5>
- <p>The right number of reduces seems to be <span class="codefrag">0.95</span> or
- <span class="codefrag">1.75</span> multiplied by (<<em>no. of nodes</em>> *
- <span class="codefrag">mapred.tasktracker.reduce.tasks.maximum</span>).</p>
- <p>With <span class="codefrag">0.95</span> all of the reduces can launch immediately
- and start transfering map outputs as the maps finish. With
- <span class="codefrag">1.75</span> the faster nodes will finish their first round of
- reduces and launch a second wave of reduces doing a much better job
- of load balancing.</p>
- <p>Increasing the number of reduces increases the framework overhead,
- but increases load balancing and lowers the cost of failures.</p>
- <p>The scaling factors above are slightly less than whole numbers to
- reserve a few reduce slots in the framework for speculative-tasks and
- failed tasks.</p>
- <a name="N10752"></a><a name="Reducer+NONE"></a>
- <h5>Reducer NONE</h5>
- <p>It is legal to set the number of reduce-tasks to <em>zero</em> if
- no reduction is desired.</p>
- <p>In this case the outputs of the map-tasks go directly to the
- <span class="codefrag">FileSystem</span>, into the output path set by
- <a href="api/org/apache/hadoop/mapred/FileOutputFormat.html#setOutputPath(org.apache.hadoop.mapred.JobConf,%20org.apache.hadoop.fs.Path)">
- setOutputPath(Path)</a>. The framework does not sort the
- map-outputs before writing them out to the <span class="codefrag">FileSystem</span>.
- </p>
- <a name="N1076D"></a><a name="Partitioner"></a>
- <h4>Partitioner</h4>
- <p>
- <a href="api/org/apache/hadoop/mapred/Partitioner.html">
- Partitioner</a> partitions the key space.</p>
- <p>Partitioner controls the partitioning of the keys of the
- intermediate map-outputs. The key (or a subset of the key) is used to
- derive the partition, typically by a <em>hash function</em>. The total
- number of partitions is the same as the number of reduce tasks for the
- job. Hence this controls which of the <span class="codefrag">m</span> reduce tasks the
- intermediate key (and hence the record) is sent to for reduction.</p>
- <p>
- <a href="api/org/apache/hadoop/mapred/lib/HashPartitioner.html">
- HashPartitioner</a> is the default <span class="codefrag">Partitioner</span>.</p>
- <a name="N1078C"></a><a name="Reporter"></a>
- <h4>Reporter</h4>
- <p>
- <a href="api/org/apache/hadoop/mapred/Reporter.html">
- Reporter</a> is a facility for Map/Reduce applications to report
- progress, set application-level status messages and update
- <span class="codefrag">Counters</span>.</p>
- <p>
- <span class="codefrag">Mapper</span> and <span class="codefrag">Reducer</span> implementations can use
- the <span class="codefrag">Reporter</span> to report progress or just indicate
- that they are alive. In scenarios where the application takes a
- significant amount of time to process individual key/value pairs,
- this is crucial since the framework might assume that the task has
- timed-out and kill that task. Another way to avoid this is to
- set the configuration parameter <span class="codefrag">mapred.task.timeout</span> to a
- high-enough value (or even set it to <em>zero</em> for no time-outs).
- </p>
- <p>Applications can also update <span class="codefrag">Counters</span> using the
- <span class="codefrag">Reporter</span>.</p>
- <a name="N107B6"></a><a name="OutputCollector"></a>
- <h4>OutputCollector</h4>
- <p>
- <a href="api/org/apache/hadoop/mapred/OutputCollector.html">
- OutputCollector</a> is a generalization of the facility provided by
- the Map/Reduce framework to collect data output by the
- <span class="codefrag">Mapper</span> or the <span class="codefrag">Reducer</span> (either the
- intermediate outputs or the output of the job).</p>
- <p>Hadoop Map/Reduce comes bundled with a
- <a href="api/org/apache/hadoop/mapred/lib/package-summary.html">
- library</a> of generally useful mappers, reducers, and partitioners.</p>
- <a name="N107D1"></a><a name="Job+Configuration"></a>
- <h3 class="h4">Job Configuration</h3>
- <p>
- <a href="api/org/apache/hadoop/mapred/JobConf.html">
- JobConf</a> represents a Map/Reduce job configuration.</p>
- <p>
- <span class="codefrag">JobConf</span> is the primary interface for a user to describe
- a Map/Reduce job to the Hadoop framework for execution. The framework
- tries to faithfully execute the job as described by <span class="codefrag">JobConf</span>,
- however:</p>
- <ul>
-
- <li>f
- Some configuration parameters may have been marked as
- <a href="api/org/apache/hadoop/conf/Configuration.html#FinalParams">
- final</a> by administrators and hence cannot be altered.
- </li>
-
- <li>
- While some job parameters are straight-forward to set (e.g.
- <a href="api/org/apache/hadoop/mapred/JobConf.html#setNumReduceTasks(int)">
- setNumReduceTasks(int)</a>), other parameters interact subtly with
- the rest of the framework and/or job configuration and are
- more complex to set (e.g.
- <a href="api/org/apache/hadoop/mapred/JobConf.html#setNumMapTasks(int)">
- setNumMapTasks(int)</a>).
- </li>
-
- </ul>
- <p>
- <span class="codefrag">JobConf</span> is typically used to specify the
- <span class="codefrag">Mapper</span>, combiner (if any), <span class="codefrag">Partitioner</span>,
- <span class="codefrag">Reducer</span>, <span class="codefrag">InputFormat</span>,
- <span class="codefrag">OutputFormat</span> and <span class="codefrag">OutputCommitter</span>
- implementations. <span class="codefrag">JobConf</span> also
- indicates the set of input files
- (<a href="api/org/apache/hadoop/mapred/FileInputFormat.html#setInputPaths(org.apache.hadoop.mapred.JobConf,%20org.apache.hadoop.fs.Path[])">setInputPaths(JobConf, Path...)</a>
- /<a href="api/org/apache/hadoop/mapred/FileInputFormat.html#addInputPath(org.apache.hadoop.mapred.JobConf,%20org.apache.hadoop.fs.Path)">addInputPath(JobConf, Path)</a>)
- and (<a href="api/org/apache/hadoop/mapred/FileInputFormat.html#setInputPaths(org.apache.hadoop.mapred.JobConf,%20java.lang.String)">setInputPaths(JobConf, String)</a>
- /<a href="api/org/apache/hadoop/mapred/FileInputFormat.html#addInputPath(org.apache.hadoop.mapred.JobConf,%20java.lang.String)">addInputPaths(JobConf, String)</a>)
- and where the output files should be written
- (<a href="api/org/apache/hadoop/mapred/FileOutputFormat.html#setOutputPath(org.apache.hadoop.mapred.JobConf,%20org.apache.hadoop.fs.Path)">setOutputPath(Path)</a>).</p>
- <p>Optionally, <span class="codefrag">JobConf</span> is used to specify other advanced
- facets of the job such as the <span class="codefrag">Comparator</span> to be used, files
- to be put in the <span class="codefrag">DistributedCache</span>, whether intermediate
- and/or job outputs are to be compressed (and how), debugging via
- user-provided scripts
- (<a href="api/org/apache/hadoop/mapred/JobConf.html#setMapDebugScript(java.lang.String)">setMapDebugScript(String)</a>/<a href="api/org/apache/hadoop/mapred/JobConf.html#setReduceDebugScript(java.lang.String)">setReduceDebugScript(String)</a>)
- , whether job tasks can be executed in a <em>speculative</em> manner
- (<a href="api/org/apache/hadoop/mapred/JobConf.html#setMapSpeculativeExecution(boolean)">setMapSpeculativeExecution(boolean)</a>)/(<a href="api/org/apache/hadoop/mapred/JobConf.html#setReduceSpeculativeExecution(boolean)">setReduceSpeculativeExecution(boolean)</a>)
- , maximum number of attempts per task
- (<a href="api/org/apache/hadoop/mapred/JobConf.html#setMaxMapAttempts(int)">setMaxMapAttempts(int)</a>/<a href="api/org/apache/hadoop/mapred/JobConf.html#setMaxReduceAttempts(int)">setMaxReduceAttempts(int)</a>)
- , percentage of tasks failure which can be tolerated by the job
- (<a href="api/org/apache/hadoop/mapred/JobConf.html#setMaxMapTaskFailuresPercent(int)">setMaxMapTaskFailuresPercent(int)</a>/<a href="api/org/apache/hadoop/mapred/JobConf.html#setMaxReduceTaskFailuresPercent(int)">setMaxReduceTaskFailuresPercent(int)</a>)
- etc.</p>
- <p>Of course, users can use
- <a href="api/org/apache/hadoop/conf/Configuration.html#set(java.lang.String, java.lang.String)">set(String, String)</a>/<a href="api/org/apache/hadoop/conf/Configuration.html#get(java.lang.String, java.lang.String)">get(String, String)</a>
- to set/get arbitrary parameters needed by applications. However, use the
- <span class="codefrag">DistributedCache</span> for large amounts of (read-only) data.</p>
- <a name="N10866"></a><a name="Task+Execution+%26+Environment"></a>
- <h3 class="h4">Task Execution & Environment</h3>
- <p>The <span class="codefrag">TaskTracker</span> executes the <span class="codefrag">Mapper</span>/
- <span class="codefrag">Reducer</span> <em>task</em> as a child process in a separate jvm.
- </p>
- <p>The child-task inherits the environment of the parent
- <span class="codefrag">TaskTracker</span>. The user can specify additional options to the
- child-jvm via the <span class="codefrag">mapred.child.java.opts</span> configuration
- parameter in the <span class="codefrag">JobConf</span> such as non-standard paths for the
- run-time linker to search shared libraries via
- <span class="codefrag">-Djava.library.path=<></span> etc. If the
- <span class="codefrag">mapred.child.java.opts</span> contains the symbol <em>@taskid@</em>
- it is interpolated with value of <span class="codefrag">taskid</span> of the map/reduce
- task.</p>
- <p>Here is an example with multiple arguments and substitutions,
- showing jvm GC logging, and start of a passwordless JVM JMX agent so that
- it can connect with jconsole and the likes to watch child memory,
- threads and get thread dumps. It also sets the maximum heap-size of the
- child jvm to 512MB and adds an additional path to the
- <span class="codefrag">java.library.path</span> of the child-jvm.</p>
- <p>
-
- <span class="codefrag"><property></span>
- <br>
- <span class="codefrag"><name>mapred.child.java.opts</name></span>
- <br>
- <span class="codefrag"><value></span>
- <br>
- <span class="codefrag">
- -Xmx512M -Djava.library.path=/home/mycompany/lib
- -verbose:gc -Xloggc:/tmp/@taskid@.gc</span>
- <br>
- <span class="codefrag">
- -Dcom.sun.management.jmxremote.authenticate=false
- -Dcom.sun.management.jmxremote.ssl=false</span>
- <br>
- <span class="codefrag"></value></span>
- <br>
-
- <span class="codefrag"></property></span>
-
- </p>
- <a name="N108B7"></a><a name="Memory+management"></a>
- <h4> Memory management</h4>
- <p>Users/admins can also specify the maximum virtual memory
- of the launched child-task, and any sub-process it launches
- recursively, using <span class="codefrag">mapred.child.ulimit</span>. Note that
- the value set here is a per process limit.
- The value for <span class="codefrag">mapred.child.ulimit</span> should be specified
- in kilo bytes (KB). And also the value must be greater than
- or equal to the -Xmx passed to JavaVM, else the VM might not start.
- </p>
- <p>Note: <span class="codefrag">mapred.child.java.opts</span> are used only for
- configuring the launched child tasks from task tracker. Configuring
- the memory options for daemons is documented in
- <a href="cluster_setup.html#Configuring+the+Environment+of+the+Hadoop+Daemons">
- cluster_setup.html </a>
- </p>
- <p>The memory available to some parts of the framework is also
- configurable. In map and reduce tasks, performance may be influenced
- by adjusting parameters influencing the concurrency of operations and
- the frequency with which data will hit disk. Monitoring the filesystem
- counters for a job- particularly relative to byte counts from the map
- and into the reduce- is invaluable to the tuning of these
- parameters.</p>
- <a name="N108D3"></a><a name="Map+Parameters"></a>
- <h4>Map Parameters</h4>
- <p>A record emitted from a map will be serialized into a buffer and
- metadata will be stored into accounting buffers. As described in the
- following options, when either the serialization buffer or the
- metadata exceed a threshold, the contents of the buffers will be
- sorted and written to disk in the background while the map continues
- to output records. If either buffer fills completely while the spill
- is in progress, the map thread will block. When the map is finished,
- any remaining records are written to disk and all on-disk segments
- are merged into a single file. Minimizing the number of spills to
- disk can decrease map time, but a larger buffer also decreases the
- memory available to the mapper.</p>
- <table class="ForrestTable" cellspacing="1" cellpadding="4">
-
- <tr>
- <th colspan="1" rowspan="1">Name</th><th colspan="1" rowspan="1">Type</th><th colspan="1" rowspan="1">Description</th>
- </tr>
-
- <tr>
- <td colspan="1" rowspan="1">io.sort.mb</td><td colspan="1" rowspan="1">int</td>
- <td colspan="1" rowspan="1">The cumulative size of the serialization and accounting
- buffers storing records emitted from the map, in megabytes.
- </td>
- </tr>
-
- <tr>
- <td colspan="1" rowspan="1">io.sort.record.percent</td><td colspan="1" rowspan="1">float</td>
- <td colspan="1" rowspan="1">The ratio of serialization to accounting space can be
- adjusted. Each serialized record requires 16 bytes of
- accounting information in addition to its serialized size to
- effect the sort. This percentage of space allocated from
- <span class="codefrag">io.sort.mb</span> affects the probability of a spill to
- disk being caused by either exhaustion of the serialization
- buffer or the accounting space. Clearly, for a map outputting
- small records, a higher value than the default will likely
- decrease the number of spills to disk.</td>
- </tr>
-
- <tr>
- <td colspan="1" rowspan="1">io.sort.spill.percent</td><td colspan="1" rowspan="1">float</td>
- <td colspan="1" rowspan="1">This is the threshold for the accounting and serialization
- buffers. When this percentage of either buffer has filled,
- their contents will be spilled to disk in the background. Let
- <span class="codefrag">io.sort.record.percent</span> be <em>r</em>,
- <span class="codefrag">io.sort.mb</span> be <em>x</em>, and this value be
- <em>q</em>. The maximum number of records collected before the
- collection thread will spill is <span class="codefrag">r * x * q * 2^16</span>.
- Note that a higher value may decrease the number of- or even
- eliminate- merges, but will also increase the probability of
- the map task getting blocked. The lowest average map times are
- usually obtained by accurately estimating the size of the map
- output and preventing multiple spills.</td>
- </tr>
-
- </table>
- <p>Other notes</p>
- <ul>
-
- <li>If either spill threshold is exceeded while a spill is in
- progress, collection will continue until the spill is finished.
- For example, if <span class="codefrag">io.sort.buffer.spill.percent</span> is set
- to 0.33, and the remainder of the buffer is filled while the spill
- runs, the next spill will include all the collected records, or
- 0.66 of the buffer, and will not generate additional spills. In
- other words, the thresholds are defining triggers, not
- blocking.</li>
-
- <li>A record larger than the serialization buffer will first
- trigger a spill, then be spilled to a separate file. It is
- undefined whether or not this record will first pass through the
- combiner.</li>
-
- </ul>
- <a name="N1093F"></a><a name="Shuffle%2FReduce+Parameters"></a>
- <h4>Shuffle/Reduce Parameters</h4>
- <p>As described previously, each reduce fetches the output assigned
- to it by the Partitioner via HTTP into memory and periodically
- merges these outputs to disk. If intermediate compression of map
- outputs is turned on, each output is decompressed into memory. The
- following options affect the frequency of these merges to disk prior
- to the reduce and the memory allocated to map output during the
- reduce.</p>
- <table class="ForrestTable" cellspacing="1" cellpadding="4">
-
- <tr>
- <th colspan="1" rowspan="1">Name</th><th colspan="1" rowspan="1">Type</th><th colspan="1" rowspan="1">Description</th>
- </tr>
-
- <tr>
- <td colspan="1" rowspan="1">io.sort.factor</td><td colspan="1" rowspan="1">int</td>
- <td colspan="1" rowspan="1">Specifies the number of segments on disk to be merged at
- the same time. It limits the number of open files and
- compression codecs during the merge. If the number of files
- exceeds this limit, the merge will proceed in several passes.
- Though this limit also applies to the map, most jobs should be
- configured so that hitting this limit is unlikely
- there.</td>
- </tr>
-
- <tr>
- <td colspan="1" rowspan="1">mapred.inmem.merge.threshold</td><td colspan="1" rowspan="1">int</td>
- <td colspan="1" rowspan="1">The number of sorted map outputs fetched into memory
- before being merged to disk. Like the spill thresholds in the
- preceding note, this is not defining a unit of partition, but
- a trigger. In practice, this is usually set very high (1000)
- or disabled (0), since merging in-memory segments is often
- less expensive than merging from disk (see notes following
- this table). This threshold influences only the frequency of
- in-memory merges during the shuffle.</td>
- </tr>
-
- <tr>
- <td colspan="1" rowspan="1">mapred.job.shuffle.merge.percent</td><td colspan="1" rowspan="1">float</td>
- <td colspan="1" rowspan="1">The memory threshold for fetched map outputs before an
- in-memory merge is started, expressed as a percentage of
- memory allocated to storing map outputs in memory. Since map
- outputs that can't fit in memory can be stalled, setting this
- high may decrease parallelism between the fetch and merge.
- Conversely, values as high as 1.0 have been effective for
- reduces whose input can fit entirely in memory. This parameter
- influences only the frequency of in-memory merges during the
- shuffle.</td>
- </tr>
-
- <tr>
- <td colspan="1" rowspan="1">mapred.job.shuffle.input.buffer.percent</td><td colspan="1" rowspan="1">float</td>
- <td colspan="1" rowspan="1">The percentage of memory- relative to the maximum heapsize
- as typically specified in <span class="codefrag">mapred.child.java.opts</span>-
- that can be allocated to storing map outputs during the
- shuffle. Though some memory should be set aside for the
- framework, in general it is advantageous to set this high
- enough to store large and numerous map outputs.</td>
- </tr>
-
- <tr>
- <td colspan="1" rowspan="1">mapred.job.reduce.input.buffer.percent</td><td colspan="1" rowspan="1">float</td>
- <td colspan="1" rowspan="1">The percentage of memory relative to the maximum heapsize
- in which map outputs may be retained during the reduce. When
- the reduce begins, map outputs will be merged to disk until
- those that remain are under the resource limit this defines.
- By default, all map outputs are merged to disk before the
- reduce begins to maximize the memory available to the reduce.
- For less memory-intensive reduces, this should be increased to
- avoid trips to disk.</td>
- </tr>
-
- </table>
- <p>Other notes</p>
- <ul>
-
- <li>If a map output is larger than 25 percent of the memory
- allocated to copying map outputs, it will be written directly to
- disk without first staging through memory.</li>
-
- <li>When running with a combiner, the reasoning about high merge
- thresholds and large buffers may not hold. For merges started
- before all map outputs have been fetched, the combiner is run
- while spilling to disk. In some cases, one can obtain better
- reduce times by spending resources combining map outputs- making
- disk spills small and parallelizing spilling and fetching- rather
- than aggressively increasing buffer sizes.</li>
-
- <li>When merging in-memory map outputs to disk to begin the
- reduce, if an intermediate merge is necessary because there are
- segments to spill and at least <span class="codefrag">io.sort.factor</span>
- segments already on disk, the in-memory map outputs will be part
- of the intermediate merge.</li>
-
- </ul>
- <a name="N109BA"></a><a name="Directory+Structure"></a>
- <h4> Directory Structure </h4>
- <p>The task tracker has local directory,
- <span class="codefrag"> ${mapred.local.dir}/taskTracker/</span> to create localized
- cache and localized job. It can define multiple local directories
- (spanning multiple disks) and then each filename is assigned to a
- semi-random local directory. When the job starts, task tracker
- creates a localized job directory relative to the local directory
- specified in the configuration. Thus the task tracker directory
- structure looks the following: </p>
- <ul>
-
- <li>
- <span class="codefrag">${mapred.local.dir}/taskTracker/archive/</span> :
- The distributed cache. This directory holds the localized distributed
- cache. Thus localized distributed cache is shared among all
- the tasks and jobs </li>
-
- <li>
- <span class="codefrag">${mapred.local.dir}/taskTracker/jobcache/$jobid/</span> :
- The localized job directory
- <ul>
-
- <li>
- <span class="codefrag">${mapred.local.dir}/taskTracker/jobcache/$jobid/work/</span>
- : The job-specific shared directory. The tasks can use this space as
- scratch space and share files among them. This directory is exposed
- to the users through the configuration property
- <span class="codefrag">job.local.dir</span>. The directory can accessed through
- api <a href="api/org/apache/hadoop/mapred/JobConf.html#getJobLocalDir()">
- JobConf.getJobLocalDir()</a>. It is available as System property also.
- So, users (streaming etc.) can call
- <span class="codefrag">System.getProperty("job.local.dir")</span> to access the
- directory.</li>
-
- <li>
- <span class="codefrag">${mapred.local.dir}/taskTracker/jobcache/$jobid/jars/</span>
- : The jars directory, which has the job jar file and expanded jar.
- The <span class="codefrag">job.jar</span> is the application's jar file that is
- automatically distributed to each machine. It is expanded in jars
- directory before the tasks for the job start. The job.jar location
- is accessible to the application through the api
- <a href="api/org/apache/hadoop/mapred/JobConf.html#getJar()">
- JobConf.getJar() </a>. To access the unjarred directory,
- JobConf.getJar().getParent() can be called.</li>
-
- <li>
- <span class="codefrag">${mapred.local.dir}/taskTracker/jobcache/$jobid/job.xml</span>
- : The job.xml file, the generic job configuration, localized for
- the job. </li>
-
- <li>
- <span class="codefrag">${mapred.local.dir}/taskTracker/jobcache/$jobid/$taskid</span>
- : The task directory for each task attempt. Each task directory
- again has the following structure :
- <ul>
-
- <li>
- <span class="codefrag">${mapred.local.dir}/taskTracker/jobcache/$jobid/$taskid/job.xml</span>
- : A job.xml file, task localized job configuration, Task localization
- means that properties have been set that are specific to
- this particular task within the job. The properties localized for
- each task are described below.</li>
-
- <li>
- <span class="codefrag">${mapred.local.dir}/taskTracker/jobcache/$jobid/$taskid/output</span>
- : A directory for intermediate output files. This contains the
- temporary map reduce data generated by the framework
- such as map output files etc. </li>
-
- <li>
- <span class="codefrag">${mapred.local.dir}/taskTracker/jobcache/$jobid/$taskid/work</span>
- : The curernt working directory of the task.
- With <a href="#Task+JVM+Reuse">jvm reuse</a> enabled for tasks, this
- directory will be the directory on which the jvm has started</li>
-
- <li>
- <span class="codefrag">${mapred.local.dir}/taskTracker/jobcache/$jobid/$taskid/work/tmp</span>
- : The temporary directory for the task.
- (User can specify the property <span class="codefrag">mapred.child.tmp</span> to set
- the value of temporary directory for map and reduce tasks. This
- defaults to <span class="codefrag">./tmp</span>. If the value is not an absolute path,
- it is prepended with task's working directory. Otherwise, it is
- directly assigned. The directory will be created if it doesn't exist.
- Then, the child java tasks are executed with option
- <span class="codefrag">-Djava.io.tmpdir='the absolute path of the tmp dir'</span>.
- Anp pipes and streaming are set with environment variable,
- <span class="codefrag">TMPDIR='the absolute path of the tmp dir'</span>). This
- directory is created, if <span class="codefrag">mapred.child.tmp</span> has the value
- <span class="codefrag">./tmp</span>
- </li>
-
- </ul>
-
- </li>
-
- </ul>
-
- </li>
-
- </ul>
- <a name="N10A29"></a><a name="Task+JVM+Reuse"></a>
- <h4>Task JVM Reuse</h4>
- <p>Jobs can enable task JVMs to be reused by specifying the job
- configuration <span class="codefrag">mapred.job.reuse.jvm.num.tasks</span>. If the
- value is 1 (the default), then JVMs are not reused
- (i.e. 1 task per JVM). If it is -1, there is no limit to the number
- of tasks a JVM can run (of the same job). One can also specify some
- value greater than 1 using the api
- <a href="api/org/apache/hadoop/mapred/JobConf.html#setNumTasksToExecutePerJvm(int)">
- JobConf.setNumTasksToExecutePerJvm(int)</a>
- </p>
- <p>The following properties are localized in the job configuration
- for each task's execution: </p>
- <table class="ForrestTable" cellspacing="1" cellpadding="4">
-
- <tr>
- <th colspan="1" rowspan="1">Name</th><th colspan="1" rowspan="1">Type</th><th colspan="1" rowspan="1">Description</th>
- </tr>
-
- <tr>
- <td colspan="1" rowspan="1">mapred.job.id</td><td colspan="1" rowspan="1">String</td><td colspan="1" rowspan="1">The job id</td>
- </tr>
-
- <tr>
- <td colspan="1" rowspan="1">mapred.jar</td><td colspan="1" rowspan="1">String</td>
- <td colspan="1" rowspan="1">job.jar location in job directory</td>
- </tr>
-
- <tr>
- <td colspan="1" rowspan="1">job.local.dir</td><td colspan="1" rowspan="1"> String</td>
- <td colspan="1" rowspan="1"> The job specific shared scratch space</td>
- </tr>
-
- <tr>
- <td colspan="1" rowspan="1">mapred.tip.id</td><td colspan="1" rowspan="1"> String</td>
- <td colspan="1" rowspan="1"> The task id</td>
- </tr>
-
- <tr>
- <td colspan="1" rowspan="1">mapred.task.id</td><td colspan="1" rowspan="1"> String</td>
- <td colspan="1" rowspan="1"> The task attempt id</td>
- </tr>
-
- <tr>
- <td colspan="1" rowspan="1">mapred.task.is.map</td><td colspan="1" rowspan="1"> boolean </td>
- <td colspan="1" rowspan="1">Is this a map task</td>
- </tr>
-
- <tr>
- <td colspan="1" rowspan="1">mapred.task.partition</td><td colspan="1" rowspan="1"> int </td>
- <td colspan="1" rowspan="1">The id of the task within the job</td>
- </tr>
-
- <tr>
- <td colspan="1" rowspan="1">map.input.file</td><td colspan="1" rowspan="1"> String</td>
- <td colspan="1" rowspan="1"> The filename that the map is reading from</td>
- </tr>
-
- <tr>
- <td colspan="1" rowspan="1">map.input.start</td><td colspan="1" rowspan="1"> long</td>
- <td colspan="1" rowspan="1"> The offset of the start of the map input split</td>
- </tr>
-
- <tr>
- <td colspan="1" rowspan="1">map.input.length </td><td colspan="1" rowspan="1">long </td>
- <td colspan="1" rowspan="1">The number of bytes in the map input split</td>
- </tr>
-
- <tr>
- <td colspan="1" rowspan="1">mapred.work.output.dir</td><td colspan="1" rowspan="1"> String </td>
- <td colspan="1" rowspan="1">The task's temporary output directory</td>
- </tr>
-
- </table>
- <p>The standard output (stdout) and error (stderr) streams of the task
- are read by the TaskTracker and logged to
- <span class="codefrag">${HADOOP_LOG_DIR}/userlogs</span>
- </p>
- <p>The <a href="#DistributedCache">DistributedCache</a> can also be used
- to distribute both jars and native libraries for use in the map
- and/or reduce tasks. The child-jvm always has its
- <em>current working directory</em> added to the
- <span class="codefrag">java.library.path</span> and <span class="codefrag">LD_LIBRARY_PATH</span>.
- And hence the cached libraries can be loaded via
- <a href="http://java.sun.com/javase/6/docs/api/java/lang/System.html#loadLibrary(java.lang.String)">
- System.loadLibrary</a> or
- <a href="http://java.sun.com/javase/6/docs/api/java/lang/System.html#load(java.lang.String)">
- System.load</a>. More details on how to load shared libraries through
- distributed cache are documented at
- <a href="native_libraries.html#Loading+native+libraries+through+DistributedCache">
- native_libraries.html</a>
- </p>
- <a name="N10B12"></a><a name="Job+Submission+and+Monitoring"></a>
- <h3 class="h4">Job Submission and Monitoring</h3>
- <p>
- <a href="api/org/apache/hadoop/mapred/JobClient.html">
- JobClient</a> is the primary interface by which user-job interacts
- with the <span class="codefrag">JobTracker</span>.</p>
- <p>
- <span class="codefrag">JobClient</span> provides facilities to submit jobs, track their
- progress, access component-tasks' reports and logs, get the Map/Reduce
- cluster's status information and so on.</p>
- <p>The job submission process involves:</p>
- <ol>
-
- <li>Checking the input and output specifications of the job.</li>
-
- <li>Computing the <span class="codefrag">InputSplit</span> values for the job.</li>
-
- <li>
- Setting up the requisite accounting information for the
- <span class="codefrag">DistributedCache</span> of the job, if necessary.
- </li>
-
- <li>
- Copying the job's jar and configuration to the Map/Reduce system
- directory on the <span class="codefrag">FileSystem</span>.
- </li>
-
- <li>
- Submitting the job to the <span class="codefrag">JobTracker</span> and optionally
- monitoring it's status.
- </li>
-
- </ol>
- <p> Job history files are also logged to user specified directory
- <span class="codefrag">hadoop.job.history.user.location</span>
- which defaults to job output directory. The files are stored in
- "_logs/history/" in the specified directory. Hence, by default they
- will be in mapred.output.dir/_logs/history. User can stop
- logging by giving the value <span class="codefrag">none</span> for
- <span class="codefrag">hadoop.job.history.user.location</span>
- </p>
- <p> User can view the history logs summary in specified directory
- using the following command <br>
-
- <span class="codefrag">$ bin/hadoop job -history output-dir</span>
- <br>
- This command will print job details, failed and killed tip
- details. <br>
- More details about the job such as successful tasks and
- task attempts made for each task can be viewed using the
- following command <br>
-
- <span class="codefrag">$ bin/hadoop job -history all output-dir</span>
- <br>
- </p>
- <p> User can use
- <a href="api/org/apache/hadoop/mapred/OutputLogFilter.html">OutputLogFilter</a>
- to filter log files from the output directory listing. </p>
- <p>Normally the user creates the application, describes various facets
- of the job via <span class="codefrag">JobConf</span>, and then uses the
- <span class="codefrag">JobClient</span> to submit the job and monitor its progress.</p>
- <a name="N10B72"></a><a name="Job+Control"></a>
- <h4>Job Control</h4>
- <p>Users may need to chain Map/Reduce jobs to accomplish complex
- tasks which cannot be done via a single Map/Reduce job. This is fairly
- easy since the output of the job typically goes to distributed
- file-system, and the output, in turn, can be used as the input for the
- next job.</p>
- <p>However, this also means that the onus on ensuring jobs are
- complete (success/failure) lies squarely on the clients. In such
- cases, the various job-control options are:</p>
- <ul>
-
- <li>
-
- <a href="api/org/apache/hadoop/mapred/JobClient.html#runJob(org.apache.hadoop.mapred.JobConf)">
- runJob(JobConf)</a> : Submits the job and returns only after the
- job has completed.
- </li>
-
- <li>
-
- <a href="api/org/apache/hadoop/mapred/JobClient.html#submitJob(org.apache.hadoop.mapred.JobConf)">
- submitJob(JobConf)</a> : Only submits the job, then poll the
- returned handle to the
- <a href="api/org/apache/hadoop/mapred/RunningJob.html">
- RunningJob</a> to query status and make scheduling decisions.
- </li>
-
- <li>
-
- <a href="api/org/apache/hadoop/mapred/JobConf.html#setJobEndNotificationURI(java.lang.String)">
- JobConf.setJobEndNotificationURI(String)</a> : Sets up a
- notification upon job-completion, thus avoiding polling.
- </li>
-
- </ul>
- <a name="N10B9C"></a><a name="Job+Input"></a>
- <h3 class="h4">Job Input</h3>
- <p>
- <a href="api/org/apache/hadoop/mapred/InputFormat.html">
- InputFormat</a> describes the input-specification for a Map/Reduce job.
- </p>
- <p>The Map/Reduce framework relies on the <span class="codefrag">InputFormat</span> of
- the job to:</p>
- <ol>
-
- <li>Validate the input-specification of the job.</li>
-
- <li>
- Split-up the input file(s) into logical <span class="codefrag">InputSplit</span>
- instances, each of which is then assigned to an individual
- <span class="codefrag">Mapper</span>.
- </li>
-
- <li>
- Provide the <span class="codefrag">RecordReader</span> implementation used to
- glean input records from the logical <span class="codefrag">InputSplit</span> for
- processing by the <span class="codefrag">Mapper</span>.
- </li>
-
- </ol>
- <p>The default behavior of file-based <span class="codefrag">InputFormat</span>
- implementations, typically sub-classes of
- <a href="api/org/apache/hadoop/mapred/FileInputFormat.html">
- FileInputFormat</a>, is to split the input into <em>logical</em>
- <span class="codefrag">InputSplit</span> instances based on the total size, in bytes, of
- the input files. However, the <span class="codefrag">FileSystem</span> blocksize of the
- input files is treated as an upper bound for input splits. A lower bound
- on the split size can be set via <span class="codefrag">mapred.min.split.size</span>.</p>
- <p>Clearly, logical splits based on input-size is insufficient for many
- applications since record boundaries must be respected. In such cases,
- the application should implement a <span class="codefrag">RecordReader</span>, who is
- responsible for respecting record-boundaries and presents a
- record-oriented view of the logical <span class="codefrag">InputSplit</span> to the
- individual task.</p>
- <p>
- <a href="api/org/apache/hadoop/mapred/TextInputFormat.html">
- TextInputFormat</a> is the default <span class="codefrag">InputFormat</span>.</p>
- <p>If <span class="codefrag">TextInputFormat</span> is the <span class="codefrag">InputFormat</span> for a
- given job, the framework detects input-files with the <em>.gz</em> and
- <em>.lzo</em> extensions and automatically decompresses them using the
- appropriate <span class="codefrag">CompressionCodec</span>. However, it must be noted that
- compressed files with the above extensions cannot be <em>split</em> and
- each compressed file is processed in its entirety by a single mapper.</p>
- <a name="N10C06"></a><a name="InputSplit"></a>
- <h4>InputSplit</h4>
- <p>
- <a href="api/org/apache/hadoop/mapred/InputSplit.html">
- InputSplit</a> represents the data to be processed by an individual
- <span class="codefrag">Mapper</span>.</p>
- <p>Typically <span class="codefrag">InputSplit</span> presents a byte-oriented view of
- the input, and it is the responsibility of <span class="codefrag">RecordReader</span>
- to process and present a record-oriented view.</p>
- <p>
- <a href="api/org/apache/hadoop/mapred/FileSplit.html">
- FileSplit</a> is the default <span class="codefrag">InputSplit</span>. It sets
- <span class="codefrag">map.input.file</span> to the path of the input file for the
- logical split.</p>
- <a name="N10C2B"></a><a name="RecordReader"></a>
- <h4>RecordReader</h4>
- <p>
- <a href="api/org/apache/hadoop/mapred/RecordReader.html">
- RecordReader</a> reads <span class="codefrag"><key, value></span> pairs from an
- <span class="codefrag">InputSplit</span>.</p>
- <p>Typically the <span class="codefrag">RecordReader</span> converts the byte-oriented
- view of the input, provided by the <span class="codefrag">InputSplit</span>, and
- presents a record-oriented to the <span class="codefrag">Mapper</span> implementations
- for processing. <span class="codefrag">RecordReader</span> thus assumes the
- responsibility of processing record boundaries and presents the tasks
- with keys and values.</p>
- <a name="N10C4E"></a><a name="Job+Output"></a>
- <h3 class="h4">Job Output</h3>
- <p>
- <a href="api/org/apache/hadoop/mapred/OutputFormat.html">
- OutputFormat</a> describes the output-specification for a Map/Reduce
- job.</p>
- <p>The Map/Reduce framework relies on the <span class="codefrag">OutputFormat</span> of
- the job to:</p>
- <ol>
-
- <li>
- Validate the output-specification of the job; for example, check that
- the output directory doesn't already exist.
- </li>
-
- <li>
- Provide the <span class="codefrag">RecordWriter</span> implementation used to
- write the output files of the job. Output files are stored in a
- <span class="codefrag">FileSystem</span>.
- </li>
-
- </ol>
- <p>
- <span class="codefrag">TextOutputFormat</span> is the default
- <span class="codefrag">OutputFormat</span>.</p>
- <a name="N10C77"></a><a name="OutputCommitter"></a>
- <h4>OutputCommitter</h4>
- <p>
- <a href="api/org/apache/hadoop/mapred/OutputCommitter.html">
- OutputCommitter</a> describes the commit of task output for a
- Map/Reduce job.</p>
- <p>The Map/Reduce framework relies on the <span class="codefrag">OutputCommitter</span>
- of the job to:</p>
- <ol>
-
- <li>
- Setup the job during initialization. For example, create
- the temporary output directory for the job during the
- initialization of the job.
- Job setup is done by a separate task when the job is
- in PREP state and after initializing tasks. Once the setup task
- completes, the job will be moved to RUNNING state.
- </li>
-
- <li>
- Cleanup the job after the job completion. For example, remove the
- temporary output directory after the job completion.
- Job cleanup is done by a separate task at the end of the job.
- Job is declared SUCCEDED/FAILED/KILLED after the cleanup
- task completes.
- </li>
-
- <li>
- Setup the task temporary output.
- Task setup is done as part of the same task, during task initialization.
- </li>
-
- <li>
- Check whether a task needs a commit. This is to avoid the commit
- procedure if a task does not need commit.
- </li>
-
- <li>
- Commit of the task output.
- Once task is done, the task will commit it's output if required.
- </li>
-
- <li>
- Discard the task commit.
- If the task has been failed/killed, the output will be cleaned-up.
- If task could not cleanup (in exception block), a separate task
- will be launched with same attempt-id to do the cleanup.
- </li>
-
- </ol>
- <p>
- <span class="codefrag">FileOutputCommitter</span> is the default
- <span class="codefrag">OutputCommitter</span>. Job setup/cleanup tasks occupy
- map or reduce slots, whichever is free on the TaskTracker. And
- JobCleanup task, TaskCleanup tasks and JobSetup task have the highest
- priority, and in that order.</p>
- <a name="N10CA7"></a><a name="Task+Side-Effect+Files"></a>
- <h4>Task Side-Effect Files</h4>
- <p>In some applications, component tasks need to create and/or write to
- side-files, which differ from the actual job-output files.</p>
- <p>In such cases there could be issues with two instances of the same
- <span class="codefrag">Mapper</span> or <span class="codefrag">Reducer</span> running simultaneously (for
- example, speculative tasks) trying to open and/or write to the same
- file (path) on the <span class="codefrag">FileSystem</span>. Hence the
- application-writer will have to pick unique names per task-attempt
- (using the attemptid, say <span class="codefrag">attempt_200709221812_0001_m_000000_0</span>),
- not just per task.</p>
- <p>To avoid these issues the Map/Reduce framework, when the
- <span class="codefrag">OutputCommitter</span> is <span class="codefrag">FileOutputCommitter</span>,
- maintains a special
- <span class="codefrag">${mapred.output.dir}/_temporary/_${taskid}</span> sub-directory
- accessible via <span class="codefrag">${mapred.work.output.dir}</span>
- for each task-attempt on the <span class="codefrag">FileSystem</span> where the output
- of the task-attempt is stored. On successful completion of the
- task-attempt, the files in the
- <span class="codefrag">${mapred.output.dir}/_temporary/_${taskid}</span> (only)
- are <em>promoted</em> to <span class="codefrag">${mapred.output.dir}</span>. Of course,
- the framework discards the sub-directory of unsuccessful task-attempts.
- This process is completely transparent to the application.</p>
- <p>The application-writer can take advantage of this feature by
- creating any side-files required in <span class="codefrag">${mapred.work.output.dir}</span>
- during execution of a task via
- <a href="api/org/apache/hadoop/mapred/FileOutputFormat.html#getWorkOutputPath(org.apache.hadoop.mapred.JobConf)">
- FileOutputFormat.getWorkOutputPath()</a>, and the framework will promote them
- similarly for succesful task-attempts, thus eliminating the need to
- pick unique paths per task-attempt.</p>
- <p>Note: The value of <span class="codefrag">${mapred.work.output.dir}</span> during
- execution of a particular task-attempt is actually
- <span class="codefrag">${mapred.output.dir}/_temporary/_{$taskid}</span>, and this value is
- set by the Map/Reduce framework. So, just create any side-files in the
- path returned by
- <a href="api/org/apache/hadoop/mapred/FileOutputFormat.html#getWorkOutputPath(org.apache.hadoop.mapred.JobConf)">
- FileOutputFormat.getWorkOutputPath() </a>from map/reduce
- task to take advantage of this feature.</p>
- <p>The entire discussion holds true for maps of jobs with
- reducer=NONE (i.e. 0 reduces) since output of the map, in that case,
- goes directly to HDFS.</p>
- <a name="N10CF5"></a><a name="RecordWriter"></a>
- <h4>RecordWriter</h4>
- <p>
- <a href="api/org/apache/hadoop/mapred/RecordWriter.html">
- RecordWriter</a> writes the output <span class="codefrag"><key, value></span>
- pairs to an output file.</p>
- <p>RecordWriter implementations write the job outputs to the
- <span class="codefrag">FileSystem</span>.</p>
- <a name="N10D0C"></a><a name="Other+Useful+Features"></a>
- <h3 class="h4">Other Useful Features</h3>
- <a name="N10D12"></a><a name="Submitting+Jobs+to+Queues"></a>
- <h4>Submitting Jobs to Queues</h4>
- <p>Users submit jobs to Queues. Queues, as collection of jobs,
- allow the system to provide specific functionality. For example,
- queues use ACLs to control which users
- who can submit jobs to them. Queues are expected to be primarily
- used by Hadoop Schedulers. </p>
- <p>Hadoop comes configured with a single mandatory queue, called
- 'default'. Queue names are defined in the
- <span class="codefrag">mapred.queue.names</span> property of the Hadoop site
- configuration. Some job schedulers, such as the
- <a href="capacity_scheduler.html">Capacity Scheduler</a>,
- support multiple queues.</p>
- <p>A job defines the queue it needs to be submitted to through the
- <span class="codefrag">mapred.job.queue.name</span> property, or through the
- <a href="api/org/apache/hadoop/mapred/JobConf.html#setQueueName(java.lang.String)">setQueueName(String)</a>
- API. Setting the queue name is optional. If a job is submitted
- without an associated queue name, it is submitted to the 'default'
- queue.</p>
- <a name="N10D30"></a><a name="Counters"></a>
- <h4>Counters</h4>
- <p>
- <span class="codefrag">Counters</span> represent global counters, defined either by
- the Map/Reduce framework or applications. Each <span class="codefrag">Counter</span> can
- be of any <span class="codefrag">Enum</span> type. Counters of a particular
- <span class="codefrag">Enum</span> are bunched into groups of type
- <span class="codefrag">Counters.Group</span>.</p>
- <p>Applications can define arbitrary <span class="codefrag">Counters</span> (of type
- <span class="codefrag">Enum</span>) and update them via
- <a href="api/org/apache/hadoop/mapred/Reporter.html#incrCounter(java.lang.Enum, long)">
- Reporter.incrCounter(Enum, long)</a> or
- <a href="api/org/apache/hadoop/mapred/Reporter.html#incrCounter(java.lang.String, java.lang.String, long amount)">
- Reporter.incrCounter(String, String, long)</a>
- in the <span class="codefrag">map</span> and/or
- <span class="codefrag">reduce</span> methods. These counters are then globally
- aggregated by the framework.</p>
- <a name="N10D5F"></a><a name="DistributedCache"></a>
- <h4>DistributedCache</h4>
- <p>
- <a href="api/org/apache/hadoop/filecache/DistributedCache.html">
- DistributedCache</a> distributes application-specific, large, read-only
- files efficiently.</p>
- <p>
- <span class="codefrag">DistributedCache</span> is a facility provided by the
- Map/Reduce framework to cache files (text, archives, jars and so on)
- needed by applications.</p>
- <p>Applications specify the files to be cached via urls (hdfs://)
- in the <span class="codefrag">JobConf</span>. The <span class="codefrag">DistributedCache</span>
- assumes that the files specified via hdfs:// urls are already present
- on the <span class="codefrag">FileSystem</span>.</p>
- <p>The framework will copy the necessary files to the slave node
- before any tasks for the job are executed on that node. Its
- efficiency stems from the fact that the files are only copied once
- per job and the ability to cache archives which are un-archived on
- the slaves.</p>
- <p>
- <span class="codefrag">DistributedCache</span> tracks the modification timestamps of
- the cached files. Clearly the cache files should not be modified by
- the application or externally while the job is executing.</p>
- <p>
- <span class="codefrag">DistributedCache</span> can be used to distribute simple,
- read-only data/text files and more complex types such as archives and
- jars. Archives (zip, tar, tgz and tar.gz files) are
- <em>un-archived</em> at the slave nodes. Files
- have <em>execution permissions</em> set. </p>
- <p>The files/archives can be distributed by setting the property
- <span class="codefrag">mapred.cache.{files|archives}</span>. If more than one
- file/archive has to be distributed, they can be added as comma
- separated paths. The properties can also be set by APIs
- <a href="api/org/apache/hadoop/filecache/DistributedCache.html#addCacheFile(java.net.URI,%20org.apache.hadoop.conf.Configuration)">
- DistributedCache.addCacheFile(URI,conf)</a>/
- <a href="api/org/apache/hadoop/filecache/DistributedCache.html#addCacheArchive(java.net.URI,%20org.apache.hadoop.conf.Configuration)">
- DistributedCache.addCacheArchive(URI,conf)</a> and
- <a href="api/org/apache/hadoop/filecache/DistributedCache.html#setCacheFiles(java.net.URI[],%20org.apache.hadoop.conf.Configuration)">
- DistributedCache.setCacheFiles(URIs,conf)</a>/
- <a href="api/org/apache/hadoop/filecache/DistributedCache.html#setCacheArchives(java.net.URI[],%20org.apache.hadoop.conf.Configuration)">
- DistributedCache.setCacheArchives(URIs,conf)</a>
- where URI is of the form
- <span class="codefrag">hdfs://host:port/absolute-path#link-name</span>.
- In Streaming, the files can be distributed through command line
- option <span class="codefrag">-cacheFile/-cacheArchive</span>.</p>
- <p>Optionally users can also direct the <span class="codefrag">DistributedCache</span>
- to <em>symlink</em> the cached file(s) into the <span class="codefrag">current working
- directory</span> of the task via the
- <a href="api/org/apache/hadoop/filecache/DistributedCache.html#createSymlink(org.apache.hadoop.conf.Configuration)">
- DistributedCache.createSymlink(Configuration)</a> api. Or by setting
- the configuration property <span class="codefrag">mapred.create.symlink</span>
- as <span class="codefrag">yes</span>. The DistributedCache will use the
- <span class="codefrag">fragment</span> of the URI as the name of the symlink.
- For example, the URI
- <span class="codefrag">hdfs://namenode:port/lib.so.1#lib.so</span>
- will have the symlink name as <span class="codefrag">lib.so</span> in task's cwd
- for the file <span class="codefrag">lib.so.1</span> in distributed cache.</p>
- <p>The <span class="codefrag">DistributedCache</span> can also be used as a
- rudimentary software distribution mechanism for use in the
- map and/or reduce tasks. It can be used to distribute both
- jars and native libraries. The
- <a href="api/org/apache/hadoop/filecache/DistributedCache.html#addArchiveToClassPath(org.apache.hadoop.fs.Path,%20org.apache.hadoop.conf.Configuration)">
- DistributedCache.addArchiveToClassPath(Path, Configuration)</a> or
- <a href="api/org/apache/hadoop/filecache/DistributedCache.html#addFileToClassPath(org.apache.hadoop.fs.Path,%20org.apache.hadoop.conf.Configuration)">
- DistributedCache.addFileToClassPath(Path, Configuration)</a> api
- can be used to cache files/jars and also add them to the
- <em>classpath</em> of child-jvm. The same can be done by setting
- the configuration properties
- <span class="codefrag">mapred.job.classpath.{files|archives}</span>. Similarly the
- cached files that are symlinked into the working directory of the
- task can be used to distribute native libraries and load them.</p>
- <a name="N10DE2"></a><a name="Tool"></a>
- <h4>Tool</h4>
- <p>The <a href="api/org/apache/hadoop/util/Tool.html">Tool</a>
- interface supports the handling of generic Hadoop command-line options.
- </p>
- <p>
- <span class="codefrag">Tool</span> is the standard for any Map/Reduce tool or
- application. The application should delegate the handling of
- standard command-line options to
- <a href="api/org/apache/hadoop/util/GenericOptionsParser.html">
- GenericOptionsParser</a> via
- <a href="api/org/apache/hadoop/util/ToolRunner.html#run(org.apache.hadoop.util.Tool, java.lang.String[])">
- ToolRunner.run(Tool, String[])</a> and only handle its custom
- arguments.</p>
- <p>
- The generic Hadoop command-line options are:<br>
-
- <span class="codefrag">
- -conf <configuration file>
- </span>
-
- <br>
-
- <span class="codefrag">
- -D <property=value>
- </span>
-
- <br>
-
- <span class="codefrag">
- -fs <local|namenode:port>
- </span>
-
- <br>
-
- <span class="codefrag">
- -jt <local|jobtracker:port>
- </span>
-
- </p>
- <a name="N10E14"></a><a name="IsolationRunner"></a>
- <h4>IsolationRunner</h4>
- <p>
- <a href="api/org/apache/hadoop/mapred/IsolationRunner.html">
- IsolationRunner</a> is a utility to help debug Map/Reduce programs.</p>
- <p>To use the <span class="codefrag">IsolationRunner</span>, first set
- <span class="codefrag">keep.failed.tasks.files</span> to <span class="codefrag">true</span>
- (also see <span class="codefrag">keep.tasks.files.pattern</span>).</p>
- <p>
- Next, go to the node on which the failed task ran and go to the
- <span class="codefrag">TaskTracker</span>'s local directory and run the
- <span class="codefrag">IsolationRunner</span>:<br>
-
- <span class="codefrag">$ cd <local path>/taskTracker/${taskid}/work</span>
- <br>
-
- <span class="codefrag">
- $ bin/hadoop org.apache.hadoop.mapred.IsolationRunner ../job.xml
- </span>
-
- </p>
- <p>
- <span class="codefrag">IsolationRunner</span> will run the failed task in a single
- jvm, which can be in the debugger, over precisely the same input.</p>
- <a name="N10E47"></a><a name="Profiling"></a>
- <h4>Profiling</h4>
- <p>Profiling is a utility to get a representative (2 or 3) sample
- of built-in java profiler for a sample of maps and reduces. </p>
- <p>User can specify whether the system should collect profiler
- information for some of the tasks in the job by setting the
- configuration property <span class="codefrag">mapred.task.profile</span>. The
- value can be set using the api
- <a href="api/org/apache/hadoop/mapred/JobConf.html#setProfileEnabled(boolean)">
- JobConf.setProfileEnabled(boolean)</a>. If the value is set
- <span class="codefrag">true</span>, the task profiling is enabled. The profiler
- information is stored in the user log directory. By default,
- profiling is not enabled for the job. </p>
- <p>Once user configures that profiling is needed, she/he can use
- the configuration property
- <span class="codefrag">mapred.task.profile.{maps|reduces}</span> to set the ranges
- of map/reduce tasks to profile. The value can be set using the api
- <a href="api/org/apache/hadoop/mapred/JobConf.html#setProfileTaskRange(boolean,%20java.lang.String)">
- JobConf.setProfileTaskRange(boolean,String)</a>.
- By default, the specified range is <span class="codefrag">0-2</span>.</p>
- <p>User can also specify the profiler configuration arguments by
- setting the configuration property
- <span class="codefrag">mapred.task.profile.params</span>. The value can be specified
- using the api
- <a href="api/org/apache/hadoop/mapred/JobConf.html#setProfileParams(java.lang.String)">
- JobConf.setProfileParams(String)</a>. If the string contains a
- <span class="codefrag">%s</span>, it will be replaced with the name of the profiling
- output file when the task runs. These parameters are passed to the
- task child JVM on the command line. The default value for
- the profiling parameters is
- <span class="codefrag">-agentlib:hprof=cpu=samples,heap=sites,force=n,thread=y,verbose=n,file=%s</span>
-
- </p>
- <a name="N10E7B"></a><a name="Debugging"></a>
- <h4>Debugging</h4>
- <p>The Map/Reduce framework provides a facility to run user-provided
- scripts for debugging. When a map/reduce task fails, a user can run
- a debug script, to process task logs for example. The script is
- given access to the task's stdout and stderr outputs, syslog and
- jobconf. The output from the debug script's stdout and stderr is
- displayed on the console diagnostics and also as part of the
- job UI. </p>
- <p> In the following sections we discuss how to submit a debug script
- with a job. The script file needs to be distributed and submitted to
- the framework.</p>
- <a name="N10E87"></a><a name="How+to+distribute+the+script+file%3A"></a>
- <h5> How to distribute the script file: </h5>
- <p>
- The user needs to use
- <a href="mapred_tutorial.html#DistributedCache">DistributedCache</a>
- to <em>distribute</em> and <em>symlink</em> the script file.</p>
- <a name="N10E9B"></a><a name="How+to+submit+the+script%3A"></a>
- <h5> How to submit the script: </h5>
- <p> A quick way to submit the debug script is to set values for the
- properties <span class="codefrag">mapred.map.task.debug.script</span> and
- <span class="codefrag">mapred.reduce.task.debug.script</span>, for debugging map and
- reduce tasks respectively. These properties can also be set by using APIs
- <a href="api/org/apache/hadoop/mapred/JobConf.html#setMapDebugScript(java.lang.String)">
- JobConf.setMapDebugScript(String) </a> and
- <a href="api/org/apache/hadoop/mapred/JobConf.html#setReduceDebugScript(java.lang.String)">
- JobConf.setReduceDebugScript(String) </a>. In streaming mode, a debug
- script can be submitted with the command-line options
- <span class="codefrag">-mapdebug</span> and <span class="codefrag">-reducedebug</span>, for debugging
- map and reduce tasks respectively.</p>
- <p>The arguments to the script are the task's stdout, stderr,
- syslog and jobconf files. The debug command, run on the node where
- the map/reduce task failed, is: <br>
-
- <span class="codefrag"> $script $stdout $stderr $syslog $jobconf </span>
- </p>
- <p> Pipes programs have the c++ program name as a fifth argument
- for the command. Thus for the pipes programs the command is <br>
-
- <span class="codefrag">$script $stdout $stderr $syslog $jobconf $program </span>
-
- </p>
- <a name="N10EC9"></a><a name="Default+Behavior%3A"></a>
- <h5> Default Behavior: </h5>
- <p> For pipes, a default script is run to process core dumps under
- gdb, prints stack trace and gives info about running threads. </p>
- <a name="N10ED4"></a><a name="JobControl"></a>
- <h4>JobControl</h4>
- <p>
- <a href="api/org/apache/hadoop/mapred/jobcontrol/package-summary.html">
- JobControl</a> is a utility which encapsulates a set of Map/Reduce jobs
- and their dependencies.</p>
- <a name="N10EE1"></a><a name="Data+Compression"></a>
- <h4>Data Compression</h4>
- <p>Hadoop Map/Reduce provides facilities for the application-writer to
- specify compression for both intermediate map-outputs and the
- job-outputs i.e. output of the reduces. It also comes bundled with
- <a href="api/org/apache/hadoop/io/compress/CompressionCodec.html">
- CompressionCodec</a> implementations for the
- <a href="http://www.zlib.net/">zlib</a> and <a href="http://www.oberhumer.com/opensource/lzo/">lzo</a> compression
- algorithms. The <a href="http://www.gzip.org/">gzip</a> file format is also
- supported.</p>
- <p>Hadoop also provides native implementations of the above compression
- codecs for reasons of both performance (zlib) and non-availability of
- Java libraries (lzo). More details on their usage and availability are
- available <a href="native_libraries.html">here</a>.</p>
- <a name="N10F01"></a><a name="Intermediate+Outputs"></a>
- <h5>Intermediate Outputs</h5>
- <p>Applications can control compression of intermediate map-outputs
- via the
- <a href="api/org/apache/hadoop/mapred/JobConf.html#setCompressMapOutput(boolean)">
- JobConf.setCompressMapOutput(boolean)</a> api and the
- <span class="codefrag">CompressionCodec</span> to be used via the
- <a href="api/org/apache/hadoop/mapred/JobConf.html#setMapOutputCompressorClass(java.lang.Class)">
- JobConf.setMapOutputCompressorClass(Class)</a> api.</p>
- <a name="N10F16"></a><a name="Job+Outputs"></a>
- <h5>Job Outputs</h5>
- <p>Applications can control compression of job-outputs via the
- <a href="api/org/apache/hadoop/mapred/FileOutputFormat.html#setCompressOutput(org.apache.hadoop.mapred.JobConf,%20boolean)">
- FileOutputFormat.setCompressOutput(JobConf, boolean)</a> api and the
- <span class="codefrag">CompressionCodec</span> to be used can be specified via the
- <a href="api/org/apache/hadoop/mapred/FileOutputFormat.html#setOutputCompressorClass(org.apache.hadoop.mapred.JobConf,%20java.lang.Class)">
- FileOutputFormat.setOutputCompressorClass(JobConf, Class)</a> api.</p>
- <p>If the job outputs are to be stored in the
- <a href="api/org/apache/hadoop/mapred/SequenceFileOutputFormat.html">
- SequenceFileOutputFormat</a>, the required
- <span class="codefrag">SequenceFile.CompressionType</span> (i.e. <span class="codefrag">RECORD</span> /
- <span class="codefrag">BLOCK</span> - defaults to <span class="codefrag">RECORD</span>) can be
- specified via the
- <a href="api/org/apache/hadoop/mapred/SequenceFileOutputFormat.html#setOutputCompressionType(org.apache.hadoop.mapred.JobConf,%20org.apache.hadoop.io.SequenceFile.CompressionType)">
- SequenceFileOutputFormat.setOutputCompressionType(JobConf,
- SequenceFile.CompressionType)</a> api.</p>
- <a name="N10F43"></a><a name="Skipping+Bad+Records"></a>
- <h4>Skipping Bad Records</h4>
- <p>Hadoop provides an option where a certain set of bad input
- records can be skipped when processing map inputs. Applications
- can control this feature through the
- <a href="api/org/apache/hadoop/mapred/SkipBadRecords.html">
- SkipBadRecords</a> class.</p>
- <p>This feature can be used when map tasks crash deterministically
- on certain input. This usually happens due to bugs in the
- map function. Usually, the user would have to fix these bugs.
- This is, however, not possible sometimes. The bug may be in third
- party libraries, for example, for which the source code is not
- available. In such cases, the task never completes successfully even
- after multiple attempts, and the job fails. With this feature, only
- a small portion of data surrounding the
- bad records is lost, which may be acceptable for some applications
- (those performing statistical analysis on very large data, for
- example). </p>
- <p>By default this feature is disabled. For enabling it,
- refer to <a href="api/org/apache/hadoop/mapred/SkipBadRecords.html#setMapperMaxSkipRecords(org.apache.hadoop.conf.Configuration, long)">
- SkipBadRecords.setMapperMaxSkipRecords(Configuration, long)</a> and
- <a href="api/org/apache/hadoop/mapred/SkipBadRecords.html#setReducerMaxSkipGroups(org.apache.hadoop.conf.Configuration, long)">
- SkipBadRecords.setReducerMaxSkipGroups(Configuration, long)</a>.
- </p>
- <p>With this feature enabled, the framework gets into 'skipping
- mode' after a certain number of map failures. For more details,
- see <a href="api/org/apache/hadoop/mapred/SkipBadRecords.html#setAttemptsToStartSkipping(org.apache.hadoop.conf.Configuration, int)">
- SkipBadRecords.setAttemptsToStartSkipping(Configuration, int)</a>.
- In 'skipping mode', map tasks maintain the range of records being
- processed. To do this, the framework relies on the processed record
- counter. See <a href="api/org/apache/hadoop/mapred/SkipBadRecords.html#COUNTER_MAP_PROCESSED_RECORDS">
- SkipBadRecords.COUNTER_MAP_PROCESSED_RECORDS</a> and
- <a href="api/org/apache/hadoop/mapred/SkipBadRecords.html#COUNTER_REDUCE_PROCESSED_GROUPS">
- SkipBadRecords.COUNTER_REDUCE_PROCESSED_GROUPS</a>.
- This counter enables the framework to know how many records have
- been processed successfully, and hence, what record range caused
- a task to crash. On further attempts, this range of records is
- skipped.</p>
- <p>The number of records skipped depends on how frequently the
- processed record counter is incremented by the application.
- It is recommended that this counter be incremented after every
- record is processed. This may not be possible in some applications
- that typically batch their processing. In such cases, the framework
- may skip additional records surrounding the bad record. Users can
- control the number of skipped records through
- <a href="api/org/apache/hadoop/mapred/SkipBadRecords.html#setMapperMaxSkipRecords(org.apache.hadoop.conf.Configuration, long)">
- SkipBadRecords.setMapperMaxSkipRecords(Configuration, long)</a> and
- <a href="api/org/apache/hadoop/mapred/SkipBadRecords.html#setReducerMaxSkipGroups(org.apache.hadoop.conf.Configuration, long)">
- SkipBadRecords.setReducerMaxSkipGroups(Configuration, long)</a>.
- The framework tries to narrow the range of skipped records using a
- binary search-like approach. The skipped range is divided into two
- halves and only one half gets executed. On subsequent
- failures, the framework figures out which half contains
- bad records. A task will be re-executed till the
- acceptable skipped value is met or all task attempts are exhausted.
- To increase the number of task attempts, use
- <a href="api/org/apache/hadoop/mapred/JobConf.html#setMaxMapAttempts(int)">
- JobConf.setMaxMapAttempts(int)</a> and
- <a href="api/org/apache/hadoop/mapred/JobConf.html#setMaxReduceAttempts(int)">
- JobConf.setMaxReduceAttempts(int)</a>.
- </p>
- <p>Skipped records are written to HDFS in the sequence file
- format, for later analysis. The location can be changed through
- <a href="api/org/apache/hadoop/mapred/SkipBadRecords.html#setSkipOutputPath(org.apache.hadoop.mapred.JobConf, org.apache.hadoop.fs.Path)">
- SkipBadRecords.setSkipOutputPath(JobConf, Path)</a>.
- </p>
- </div>
-
- <a name="N10F8A"></a><a name="Example%3A+WordCount+v2.0"></a>
- <h2 class="h3">Example: WordCount v2.0</h2>
- <div class="section">
- <p>Here is a more complete <span class="codefrag">WordCount</span> which uses many of the
- features provided by the Map/Reduce framework we discussed so far.</p>
- <p>This needs the HDFS to be up and running, especially for the
- <span class="codefrag">DistributedCache</span>-related features. Hence it only works with a
- <a href="quickstart.html#SingleNodeSetup">pseudo-distributed</a> or
- <a href="quickstart.html#Fully-Distributed+Operation">fully-distributed</a>
- Hadoop installation.</p>
- <a name="N10FA4"></a><a name="Source+Code-N10FA4"></a>
- <h3 class="h4">Source Code</h3>
- <table class="ForrestTable" cellspacing="1" cellpadding="4">
-
- <tr>
-
- <th colspan="1" rowspan="1"></th>
- <th colspan="1" rowspan="1">WordCount.java</th>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">1.</td>
- <td colspan="1" rowspan="1">
- <span class="codefrag">package org.myorg;</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">2.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">3.</td>
- <td colspan="1" rowspan="1">
- <span class="codefrag">import java.io.*;</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">4.</td>
- <td colspan="1" rowspan="1">
- <span class="codefrag">import java.util.*;</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">5.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">6.</td>
- <td colspan="1" rowspan="1">
- <span class="codefrag">import org.apache.hadoop.fs.Path;</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">7.</td>
- <td colspan="1" rowspan="1">
- <span class="codefrag">import org.apache.hadoop.filecache.DistributedCache;</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">8.</td>
- <td colspan="1" rowspan="1">
- <span class="codefrag">import org.apache.hadoop.conf.*;</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">9.</td>
- <td colspan="1" rowspan="1">
- <span class="codefrag">import org.apache.hadoop.io.*;</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">10.</td>
- <td colspan="1" rowspan="1">
- <span class="codefrag">import org.apache.hadoop.mapred.*;</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">11.</td>
- <td colspan="1" rowspan="1">
- <span class="codefrag">import org.apache.hadoop.util.*;</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">12.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">13.</td>
- <td colspan="1" rowspan="1">
- <span class="codefrag">public class WordCount extends Configured implements Tool {</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">14.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">15.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">
- public static class Map extends MapReduceBase
- implements Mapper<LongWritable, Text, Text, IntWritable> {
- </span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">16.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">17.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">
- static enum Counters { INPUT_WORDS }
- </span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">18.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">19.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">
- private final static IntWritable one = new IntWritable(1);
- </span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">20.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">private Text word = new Text();</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">21.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">22.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">private boolean caseSensitive = true;</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">23.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">private Set<String> patternsToSkip = new HashSet<String>();</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">24.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">25.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">private long numRecords = 0;</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">26.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">private String inputFile;</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">27.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">28.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">public void configure(JobConf job) {</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">29.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">
- caseSensitive = job.getBoolean("wordcount.case.sensitive", true);
- </span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">30.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">inputFile = job.get("map.input.file");</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">31.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">32.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">if (job.getBoolean("wordcount.skip.patterns", false)) {</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">33.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">Path[] patternsFiles = new Path[0];</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">34.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">try {</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">35.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">
- patternsFiles = DistributedCache.getLocalCacheFiles(job);
- </span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">36.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">} catch (IOException ioe) {</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">37.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">
- System.err.println("Caught exception while getting cached files: "
- + StringUtils.stringifyException(ioe));
- </span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">38.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">}</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">39.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">for (Path patternsFile : patternsFiles) {</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">40.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">parseSkipFile(patternsFile);</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">41.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">}</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">42.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">}</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">43.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">}</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">44.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">45.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">private void parseSkipFile(Path patternsFile) {</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">46.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">try {</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">47.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">
- BufferedReader fis =
- new BufferedReader(new FileReader(patternsFile.toString()));
- </span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">48.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">String pattern = null;</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">49.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">while ((pattern = fis.readLine()) != null) {</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">50.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">patternsToSkip.add(pattern);</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">51.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">}</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">52.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">} catch (IOException ioe) {</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">53.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">
- System.err.println("Caught exception while parsing the cached file '" +
- patternsFile + "' : " +
- StringUtils.stringifyException(ioe));
-
- </span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">54.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">}</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">55.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">}</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">56.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">57.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">
- public void map(LongWritable key, Text value,
- OutputCollector<Text, IntWritable> output,
- Reporter reporter) throws IOException {
- </span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">58.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">
- String line =
- (caseSensitive) ? value.toString() :
- value.toString().toLowerCase();
- </span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">59.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">60.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">for (String pattern : patternsToSkip) {</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">61.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">line = line.replaceAll(pattern, "");</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">62.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">}</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">63.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">64.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">StringTokenizer tokenizer = new StringTokenizer(line);</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">65.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">while (tokenizer.hasMoreTokens()) {</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">66.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">word.set(tokenizer.nextToken());</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">67.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">output.collect(word, one);</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">68.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">reporter.incrCounter(Counters.INPUT_WORDS, 1);</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">69.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">}</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">70.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">71.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">if ((++numRecords % 100) == 0) {</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">72.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">
- reporter.setStatus("Finished processing " + numRecords +
- " records " + "from the input file: " +
- inputFile);
- </span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">73.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">}</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">74.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">}</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">75.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">}</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">76.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">77.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">
- public static class Reduce extends MapReduceBase implements
- Reducer<Text, IntWritable, Text, IntWritable> {
- </span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">78.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">
- public void reduce(Text key, Iterator<IntWritable> values,
- OutputCollector<Text, IntWritable> output,
- Reporter reporter) throws IOException {
- </span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">79.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">int sum = 0;</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">80.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">while (values.hasNext()) {</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">81.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">sum += values.next().get();</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">82.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">}</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">83.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">output.collect(key, new IntWritable(sum));</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">84.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">}</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">85.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">}</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">86.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">87.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">public int run(String[] args) throws Exception {</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">88.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">
- JobConf conf = new JobConf(getConf(), WordCount.class);
- </span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">89.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">conf.setJobName("wordcount");</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">90.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">91.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">conf.setOutputKeyClass(Text.class);</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">92.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">conf.setOutputValueClass(IntWritable.class);</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">93.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">94.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">conf.setMapperClass(Map.class);</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">95.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">conf.setCombinerClass(Reduce.class);</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">96.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">conf.setReducerClass(Reduce.class);</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">97.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">98.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">conf.setInputFormat(TextInputFormat.class);</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">99.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">conf.setOutputFormat(TextOutputFormat.class);</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">100.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">101.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">
- List<String> other_args = new ArrayList<String>();
- </span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">102.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">for (int i=0; i < args.length; ++i) {</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">103.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">if ("-skip".equals(args[i])) {</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">104.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">
- DistributedCache.addCacheFile(new Path(args[++i]).toUri(), conf);
- </span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">105.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">
- conf.setBoolean("wordcount.skip.patterns", true);
- </span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">106.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">} else {</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">107.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">other_args.add(args[i]);</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">108.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">}</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">109.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">}</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">110.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">111.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">FileInputFormat.setInputPaths(conf, new Path(other_args.get(0)));</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">112.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">FileOutputFormat.setOutputPath(conf, new Path(other_args.get(1)));</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">113.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">114.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">JobClient.runJob(conf);</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">115.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">return 0;</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">116.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">}</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">117.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">118.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">
- public static void main(String[] args) throws Exception {
- </span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">119.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">
- int res = ToolRunner.run(new Configuration(), new WordCount(),
- args);
- </span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">120.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">System.exit(res);</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">121.</td>
- <td colspan="1" rowspan="1">
-
- <span class="codefrag">}</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">122.</td>
- <td colspan="1" rowspan="1">
- <span class="codefrag">}</span>
- </td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">123.</td>
- <td colspan="1" rowspan="1"></td>
-
- </tr>
-
- </table>
- <a name="N11706"></a><a name="Sample+Runs"></a>
- <h3 class="h4">Sample Runs</h3>
- <p>Sample text-files as input:</p>
- <p>
-
- <span class="codefrag">$ bin/hadoop dfs -ls /usr/joe/wordcount/input/</span>
- <br>
-
- <span class="codefrag">/usr/joe/wordcount/input/file01</span>
- <br>
-
- <span class="codefrag">/usr/joe/wordcount/input/file02</span>
- <br>
-
- <br>
-
- <span class="codefrag">$ bin/hadoop dfs -cat /usr/joe/wordcount/input/file01</span>
- <br>
-
- <span class="codefrag">Hello World, Bye World!</span>
- <br>
-
- <br>
-
- <span class="codefrag">$ bin/hadoop dfs -cat /usr/joe/wordcount/input/file02</span>
- <br>
-
- <span class="codefrag">Hello Hadoop, Goodbye to hadoop.</span>
-
- </p>
- <p>Run the application:</p>
- <p>
-
- <span class="codefrag">
- $ bin/hadoop jar /usr/joe/wordcount.jar org.myorg.WordCount
- /usr/joe/wordcount/input /usr/joe/wordcount/output
- </span>
-
- </p>
- <p>Output:</p>
- <p>
-
- <span class="codefrag">
- $ bin/hadoop dfs -cat /usr/joe/wordcount/output/part-00000
- </span>
-
- <br>
-
- <span class="codefrag">Bye 1</span>
- <br>
-
- <span class="codefrag">Goodbye 1</span>
- <br>
-
- <span class="codefrag">Hadoop, 1</span>
- <br>
-
- <span class="codefrag">Hello 2</span>
- <br>
-
- <span class="codefrag">World! 1</span>
- <br>
-
- <span class="codefrag">World, 1</span>
- <br>
-
- <span class="codefrag">hadoop. 1</span>
- <br>
-
- <span class="codefrag">to 1</span>
- <br>
-
- </p>
- <p>Notice that the inputs differ from the first version we looked at,
- and how they affect the outputs.</p>
- <p>Now, lets plug-in a pattern-file which lists the word-patterns to be
- ignored, via the <span class="codefrag">DistributedCache</span>.</p>
- <p>
-
- <span class="codefrag">$ hadoop dfs -cat /user/joe/wordcount/patterns.txt</span>
- <br>
-
- <span class="codefrag">\.</span>
- <br>
-
- <span class="codefrag">\,</span>
- <br>
-
- <span class="codefrag">\!</span>
- <br>
-
- <span class="codefrag">to</span>
- <br>
-
- </p>
- <p>Run it again, this time with more options:</p>
- <p>
-
- <span class="codefrag">
- $ bin/hadoop jar /usr/joe/wordcount.jar org.myorg.WordCount
- -Dwordcount.case.sensitive=true /usr/joe/wordcount/input
- /usr/joe/wordcount/output -skip /user/joe/wordcount/patterns.txt
- </span>
-
- </p>
- <p>As expected, the output:</p>
- <p>
-
- <span class="codefrag">
- $ bin/hadoop dfs -cat /usr/joe/wordcount/output/part-00000
- </span>
-
- <br>
-
- <span class="codefrag">Bye 1</span>
- <br>
-
- <span class="codefrag">Goodbye 1</span>
- <br>
-
- <span class="codefrag">Hadoop 1</span>
- <br>
-
- <span class="codefrag">Hello 2</span>
- <br>
-
- <span class="codefrag">World 2</span>
- <br>
-
- <span class="codefrag">hadoop 1</span>
- <br>
-
- </p>
- <p>Run it once more, this time switch-off case-sensitivity:</p>
- <p>
-
- <span class="codefrag">
- $ bin/hadoop jar /usr/joe/wordcount.jar org.myorg.WordCount
- -Dwordcount.case.sensitive=false /usr/joe/wordcount/input
- /usr/joe/wordcount/output -skip /user/joe/wordcount/patterns.txt
- </span>
-
- </p>
- <p>Sure enough, the output:</p>
- <p>
-
- <span class="codefrag">
- $ bin/hadoop dfs -cat /usr/joe/wordcount/output/part-00000
- </span>
-
- <br>
-
- <span class="codefrag">bye 1</span>
- <br>
-
- <span class="codefrag">goodbye 1</span>
- <br>
-
- <span class="codefrag">hadoop 2</span>
- <br>
-
- <span class="codefrag">hello 2</span>
- <br>
-
- <span class="codefrag">world 2</span>
- <br>
-
- </p>
- <a name="N117DA"></a><a name="Highlights"></a>
- <h3 class="h4">Highlights</h3>
- <p>The second version of <span class="codefrag">WordCount</span> improves upon the
- previous one by using some features offered by the Map/Reduce framework:
- </p>
- <ul>
-
- <li>
- Demonstrates how applications can access configuration parameters
- in the <span class="codefrag">configure</span> method of the <span class="codefrag">Mapper</span> (and
- <span class="codefrag">Reducer</span>) implementations (lines 28-43).
- </li>
-
- <li>
- Demonstrates how the <span class="codefrag">DistributedCache</span> can be used to
- distribute read-only data needed by the jobs. Here it allows the user
- to specify word-patterns to skip while counting (line 104).
- </li>
-
- <li>
- Demonstrates the utility of the <span class="codefrag">Tool</span> interface and the
- <span class="codefrag">GenericOptionsParser</span> to handle generic Hadoop
- command-line options (lines 87-116, 119).
- </li>
-
- <li>
- Demonstrates how applications can use <span class="codefrag">Counters</span> (line 68)
- and how they can set application-specific status information via
- the <span class="codefrag">Reporter</span> instance passed to the <span class="codefrag">map</span> (and
- <span class="codefrag">reduce</span>) method (line 72).
- </li>
-
- </ul>
- </div>
-
- <p>
-
- <em>Java and JNI are trademarks or registered trademarks of
- Sun Microsystems, Inc. in the United States and other countries.</em>
-
- </p>
-
-
- </div>
- <!--+
- |end content
- +-->
- <div class="clearboth"> </div>
- </div>
- <div id="footer">
- <!--+
- |start bottomstrip
- +-->
- <div class="lastmodified">
- <script type="text/javascript"><!--
- document.write("Last Published: " + document.lastModified);
- // --></script>
- </div>
- <div class="copyright">
- Copyright ©
- 2008 <a href="http://www.apache.org/licenses/">The Apache Software Foundation.</a>
- </div>
- <!--+
- |end bottomstrip
- +-->
- </div>
- </body>
- </html>
|