mapred_tutorial.xml 96 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623
  1. <?xml version="1.0"?>
  2. <!--
  3. Copyright 2002-2004 The Apache Software Foundation
  4. Licensed under the Apache License, Version 2.0 (the "License");
  5. you may not use this file except in compliance with the License.
  6. You may obtain a copy of the License at
  7. http://www.apache.org/licenses/LICENSE-2.0
  8. Unless required by applicable law or agreed to in writing, software
  9. distributed under the License is distributed on an "AS IS" BASIS,
  10. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  11. See the License for the specific language governing permissions and
  12. limitations under the License.
  13. -->
  14. <!DOCTYPE document PUBLIC "-//APACHE//DTD Documentation V2.0//EN" "http://forrest.apache.org/dtd/document-v20.dtd">
  15. <document>
  16. <header>
  17. <title>Hadoop Map-Reduce Tutorial</title>
  18. </header>
  19. <body>
  20. <section>
  21. <title>Purpose</title>
  22. <p>This document comprehensively describes all user-facing facets of the
  23. Hadoop Map-Reduce framework and serves as a tutorial.
  24. </p>
  25. </section>
  26. <section>
  27. <title>Pre-requisites</title>
  28. <p>Ensure that Hadoop is installed, configured and is running. More
  29. details:</p>
  30. <ul>
  31. <li>
  32. Hadoop <a href="quickstart.html">Quickstart</a> for first-time users.
  33. </li>
  34. <li>
  35. Hadoop <a href="cluster_setup.html">Cluster Setup</a> for large,
  36. distributed clusters.
  37. </li>
  38. </ul>
  39. </section>
  40. <section>
  41. <title>Overview</title>
  42. <p>Hadoop Map-Reduce is a software framework for easily writing
  43. applications which process vast amounts of data (multi-terabyte data-sets)
  44. in-parallel on large clusters (thousands of nodes) of commodity
  45. hardware in a reliable, fault-tolerant manner.</p>
  46. <p>A Map-Reduce <em>job</em> usually splits the input data-set into
  47. independent chunks which are processed by the <em>map tasks</em> in a
  48. completely parallel manner. The framework sorts the outputs of the maps,
  49. which are then input to the <em>reduce tasks</em>. Typically both the
  50. input and the output of the job are stored in a file-system. The framework
  51. takes care of scheduling tasks, monitoring them and re-executes the failed
  52. tasks.</p>
  53. <p>Typically the compute nodes and the storage nodes are the same, that is,
  54. the Map-Reduce framework and the <a href="hdfs_design.html">Distributed
  55. FileSystem</a> are running on the same set of nodes. This configuration
  56. allows the framework to effectively schedule tasks on the nodes where data
  57. is already present, resulting in very high aggregate bandwidth across the
  58. cluster.</p>
  59. <p>The Map-Reduce framework consists of a single master
  60. <code>JobTracker</code> and one slave <code>TaskTracker</code> per
  61. cluster-node. The master is responsible for scheduling the jobs' component
  62. tasks on the slaves, monitoring them and re-executing the failed tasks. The
  63. slaves execute the tasks as directed by the master.</p>
  64. <p>Minimally, applications specify the input/output locations and supply
  65. <em>map</em> and <em>reduce</em> functions via implementations of
  66. appropriate interfaces and/or abstract-classes. These, and other job
  67. parameters, comprise the <em>job configuration</em>. The Hadoop
  68. <em>job client</em> then submits the job (jar/executable etc.) and
  69. configuration to the <code>JobTracker</code> which then assumes the
  70. responsibility of distributing the software/configuration to the slaves,
  71. scheduling tasks and monitoring them, providing status and diagnostic
  72. information to the job-client.</p>
  73. <p>Although the Hadoop framework is implemented in Java<sup>TM</sup>,
  74. Map-Reduce applications need not be written in Java.</p>
  75. <ul>
  76. <li>
  77. <a href="ext:api/org/apache/hadoop/streaming/package-summary">
  78. Hadoop Streaming</a> is a utility which allows users to create and run
  79. jobs with any executables (e.g. shell utilities) as the mapper and/or
  80. the reducer.
  81. </li>
  82. <li>
  83. <a href="ext:api/org/apache/hadoop/mapred/pipes/package-summary">
  84. Hadoop Pipes</a> is a <a href="http://www.swig.org/">SWIG</a>-
  85. compatible <em>C++ API</em> to implement Map-Reduce applications (non
  86. JNI<sup>TM</sup> based).
  87. </li>
  88. </ul>
  89. </section>
  90. <section>
  91. <title>Inputs and Outputs</title>
  92. <p>The Map-Reduce framework operates exclusively on
  93. <code>&lt;key, value&gt;</code> pairs, that is, the framework views the
  94. input to the job as a set of <code>&lt;key, value&gt;</code> pairs and
  95. produces a set of <code>&lt;key, value&gt;</code> pairs as the output of
  96. the job, conceivably of different types.</p>
  97. <p>The <code>key</code> and <code>value</code> classes have to be
  98. serializable by the framework and hence need to implement the
  99. <a href="ext:api/org/apache/hadoop/io/writable">Writable</a>
  100. interface. Additionally, the <code>key</code> classes have to implement the
  101. <a href="ext:api/org/apache/hadoop/io/writablecomparable">
  102. WritableComparable</a> interface to facilitate sorting by the framework.
  103. </p>
  104. <p>Input and Output types of a Map-Reduce job:</p>
  105. <p>
  106. (input) <code>&lt;k1, v1&gt;</code>
  107. -&gt;
  108. <strong>map</strong>
  109. -&gt;
  110. <code>&lt;k2, v2&gt;</code>
  111. -&gt;
  112. <strong>combine</strong>
  113. -&gt;
  114. <code>&lt;k2, v2&gt;</code>
  115. -&gt;
  116. <strong>reduce</strong>
  117. -&gt;
  118. <code>&lt;k3, v3&gt;</code> (output)
  119. </p>
  120. </section>
  121. <section>
  122. <title>Example: WordCount v1.0</title>
  123. <p>Before we jump into the details, lets walk through an example Map-Reduce
  124. application to get a flavour for how they work.</p>
  125. <p><code>WordCount</code> is a simple application that counts the number of
  126. occurences of each word in a given input set.</p>
  127. <p>This works with a
  128. <a href="quickstart.html#Standalone+Operation">local-standalone</a>,
  129. <a href="quickstart.html#SingleNodeSetup">pseudo-distributed</a> or
  130. <a href="quickstart.html#Fully-Distributed+Operation">fully-distributed</a>
  131. Hadoop installation.</p>
  132. <section>
  133. <title>Source Code</title>
  134. <table>
  135. <tr>
  136. <th></th>
  137. <th>WordCount.java</th>
  138. </tr>
  139. <tr>
  140. <td>1.</td>
  141. <td>
  142. <code>package org.myorg;</code>
  143. </td>
  144. </tr>
  145. <tr>
  146. <td>2.</td>
  147. <td></td>
  148. </tr>
  149. <tr>
  150. <td>3.</td>
  151. <td>
  152. <code>import java.io.IOException;</code>
  153. </td>
  154. </tr>
  155. <tr>
  156. <td>4.</td>
  157. <td>
  158. <code>import java.util.*;</code>
  159. </td>
  160. </tr>
  161. <tr>
  162. <td>5.</td>
  163. <td></td>
  164. </tr>
  165. <tr>
  166. <td>6.</td>
  167. <td>
  168. <code>import org.apache.hadoop.fs.Path;</code>
  169. </td>
  170. </tr>
  171. <tr>
  172. <td>7.</td>
  173. <td>
  174. <code>import org.apache.hadoop.conf.*;</code>
  175. </td>
  176. </tr>
  177. <tr>
  178. <td>8.</td>
  179. <td>
  180. <code>import org.apache.hadoop.io.*;</code>
  181. </td>
  182. </tr>
  183. <tr>
  184. <td>9.</td>
  185. <td>
  186. <code>import org.apache.hadoop.mapred.*;</code>
  187. </td>
  188. </tr>
  189. <tr>
  190. <td>10.</td>
  191. <td>
  192. <code>import org.apache.hadoop.util.*;</code>
  193. </td>
  194. </tr>
  195. <tr>
  196. <td>11.</td>
  197. <td></td>
  198. </tr>
  199. <tr>
  200. <td>12.</td>
  201. <td>
  202. <code>public class WordCount {</code>
  203. </td>
  204. </tr>
  205. <tr>
  206. <td>13.</td>
  207. <td></td>
  208. </tr>
  209. <tr>
  210. <td>14.</td>
  211. <td>
  212. &nbsp;&nbsp;
  213. <code>
  214. public static class Map extends MapReduceBase
  215. implements Mapper&lt;LongWritable, Text, Text, IntWritable&gt; {
  216. </code>
  217. </td>
  218. </tr>
  219. <tr>
  220. <td>15.</td>
  221. <td>
  222. &nbsp;&nbsp;&nbsp;&nbsp;
  223. <code>
  224. private final static IntWritable one = new IntWritable(1);
  225. </code>
  226. </td>
  227. </tr>
  228. <tr>
  229. <td>16.</td>
  230. <td>
  231. &nbsp;&nbsp;&nbsp;&nbsp;
  232. <code>private Text word = new Text();</code>
  233. </td>
  234. </tr>
  235. <tr>
  236. <td>17.</td>
  237. <td></td>
  238. </tr>
  239. <tr>
  240. <td>18.</td>
  241. <td>
  242. &nbsp;&nbsp;&nbsp;&nbsp;
  243. <code>
  244. public void map(LongWritable key, Text value,
  245. OutputCollector&lt;Text, IntWritable&gt; output,
  246. Reporter reporter) throws IOException {
  247. </code>
  248. </td>
  249. </tr>
  250. <tr>
  251. <td>19.</td>
  252. <td>
  253. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  254. <code>String line = value.toString();</code>
  255. </td>
  256. </tr>
  257. <tr>
  258. <td>20.</td>
  259. <td>
  260. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  261. <code>StringTokenizer tokenizer = new StringTokenizer(line);</code>
  262. </td>
  263. </tr>
  264. <tr>
  265. <td>21.</td>
  266. <td>
  267. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  268. <code>while (tokenizer.hasMoreTokens()) {</code>
  269. </td>
  270. </tr>
  271. <tr>
  272. <td>22.</td>
  273. <td>
  274. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  275. <code>word.set(tokenizer.nextToken());</code>
  276. </td>
  277. </tr>
  278. <tr>
  279. <td>23.</td>
  280. <td>
  281. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  282. <code>output.collect(word, one);</code>
  283. </td>
  284. </tr>
  285. <tr>
  286. <td>24.</td>
  287. <td>
  288. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  289. <code>}</code>
  290. </td>
  291. </tr>
  292. <tr>
  293. <td>25.</td>
  294. <td>
  295. &nbsp;&nbsp;&nbsp;&nbsp;
  296. <code>}</code>
  297. </td>
  298. </tr>
  299. <tr>
  300. <td>26.</td>
  301. <td>
  302. &nbsp;&nbsp;
  303. <code>}</code>
  304. </td>
  305. </tr>
  306. <tr>
  307. <td>27.</td>
  308. <td></td>
  309. </tr>
  310. <tr>
  311. <td>28.</td>
  312. <td>
  313. &nbsp;&nbsp;
  314. <code>
  315. public static class Reduce extends MapReduceBase implements
  316. Reducer&lt;Text, IntWritable, Text, IntWritable&gt; {
  317. </code>
  318. </td>
  319. </tr>
  320. <tr>
  321. <td>29.</td>
  322. <td>
  323. &nbsp;&nbsp;&nbsp;&nbsp;
  324. <code>
  325. public void reduce(Text key, Iterator&lt;IntWritable&gt; values,
  326. OutputCollector&lt;Text, IntWritable&gt; output,
  327. Reporter reporter) throws IOException {
  328. </code>
  329. </td>
  330. </tr>
  331. <tr>
  332. <td>30.</td>
  333. <td>
  334. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  335. <code>int sum = 0;</code>
  336. </td>
  337. </tr>
  338. <tr>
  339. <td>31.</td>
  340. <td>
  341. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  342. <code>while (values.hasNext()) {</code>
  343. </td>
  344. </tr>
  345. <tr>
  346. <td>32.</td>
  347. <td>
  348. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  349. <code>sum += values.next().get();</code>
  350. </td>
  351. </tr>
  352. <tr>
  353. <td>33.</td>
  354. <td>
  355. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  356. <code>}</code>
  357. </td>
  358. </tr>
  359. <tr>
  360. <td>34.</td>
  361. <td>
  362. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  363. <code>output.collect(key, new IntWritable(sum));</code>
  364. </td>
  365. </tr>
  366. <tr>
  367. <td>35.</td>
  368. <td>
  369. &nbsp;&nbsp;&nbsp;&nbsp;
  370. <code>}</code>
  371. </td>
  372. </tr>
  373. <tr>
  374. <td>36.</td>
  375. <td>
  376. &nbsp;&nbsp;
  377. <code>}</code>
  378. </td>
  379. </tr>
  380. <tr>
  381. <td>37.</td>
  382. <td></td>
  383. </tr>
  384. <tr>
  385. <td>38.</td>
  386. <td>
  387. &nbsp;&nbsp;
  388. <code>
  389. public static void main(String[] args) throws Exception {
  390. </code>
  391. </td>
  392. </tr>
  393. <tr>
  394. <td>39.</td>
  395. <td>
  396. &nbsp;&nbsp;&nbsp;&nbsp;
  397. <code>
  398. JobConf conf = new JobConf(WordCount.class);
  399. </code>
  400. </td>
  401. </tr>
  402. <tr>
  403. <td>40.</td>
  404. <td>
  405. &nbsp;&nbsp;&nbsp;&nbsp;
  406. <code>conf.setJobName("wordcount");</code>
  407. </td>
  408. </tr>
  409. <tr>
  410. <td>41.</td>
  411. <td></td>
  412. </tr>
  413. <tr>
  414. <td>42.</td>
  415. <td>
  416. &nbsp;&nbsp;&nbsp;&nbsp;
  417. <code>conf.setOutputKeyClass(Text.class);</code>
  418. </td>
  419. </tr>
  420. <tr>
  421. <td>43.</td>
  422. <td>
  423. &nbsp;&nbsp;&nbsp;&nbsp;
  424. <code>conf.setOutputValueClass(IntWritable.class);</code>
  425. </td>
  426. </tr>
  427. <tr>
  428. <td>44.</td>
  429. <td></td>
  430. </tr>
  431. <tr>
  432. <td>45.</td>
  433. <td>
  434. &nbsp;&nbsp;&nbsp;&nbsp;
  435. <code>conf.setMapperClass(Map.class);</code>
  436. </td>
  437. </tr>
  438. <tr>
  439. <td>46.</td>
  440. <td>
  441. &nbsp;&nbsp;&nbsp;&nbsp;
  442. <code>conf.setCombinerClass(Reduce.class);</code>
  443. </td>
  444. </tr>
  445. <tr>
  446. <td>47.</td>
  447. <td>
  448. &nbsp;&nbsp;&nbsp;&nbsp;
  449. <code>conf.setReducerClass(Reduce.class);</code>
  450. </td>
  451. </tr>
  452. <tr>
  453. <td>48.</td>
  454. <td></td>
  455. </tr>
  456. <tr>
  457. <td>49.</td>
  458. <td>
  459. &nbsp;&nbsp;&nbsp;&nbsp;
  460. <code>conf.setInputFormat(TextInputFormat.class);</code>
  461. </td>
  462. </tr>
  463. <tr>
  464. <td>50.</td>
  465. <td>
  466. &nbsp;&nbsp;&nbsp;&nbsp;
  467. <code>conf.setOutputFormat(TextOutputFormat.class);</code>
  468. </td>
  469. </tr>
  470. <tr>
  471. <td>51.</td>
  472. <td></td>
  473. </tr>
  474. <tr>
  475. <td>52.</td>
  476. <td>
  477. &nbsp;&nbsp;&nbsp;&nbsp;
  478. <code>FileInputFormat.setInputPaths(conf, new Path(args[0]));</code>
  479. </td>
  480. </tr>
  481. <tr>
  482. <td>53.</td>
  483. <td>
  484. &nbsp;&nbsp;&nbsp;&nbsp;
  485. <code>FileOutputFormat.setOutputPath(conf, new Path(args[1]));</code>
  486. </td>
  487. </tr>
  488. <tr>
  489. <td>54.</td>
  490. <td></td>
  491. </tr>
  492. <tr>
  493. <td>55.</td>
  494. <td>
  495. &nbsp;&nbsp;&nbsp;&nbsp;
  496. <code>JobClient.runJob(conf);</code>
  497. </td>
  498. </tr>
  499. <tr>
  500. <td>57.</td>
  501. <td>
  502. &nbsp;&nbsp;
  503. <code>}</code>
  504. </td>
  505. </tr>
  506. <tr>
  507. <td>58.</td>
  508. <td>
  509. <code>}</code>
  510. </td>
  511. </tr>
  512. <tr>
  513. <td>59.</td>
  514. <td></td>
  515. </tr>
  516. </table>
  517. </section>
  518. <section>
  519. <title>Usage</title>
  520. <p>Assuming <code>HADOOP_HOME</code> is the root of the installation and
  521. <code>HADOOP_VERSION</code> is the Hadoop version installed, compile
  522. <code>WordCount.java</code> and create a jar:</p>
  523. <p>
  524. <code>$ mkdir wordcount_classes</code><br/>
  525. <code>
  526. $ javac -classpath ${HADOOP_HOME}/hadoop-${HADOOP_VERSION}-core.jar
  527. -d wordcount_classes WordCount.java
  528. </code><br/>
  529. <code>$ jar -cvf /usr/joe/wordcount.jar -C wordcount_classes/ .</code>
  530. </p>
  531. <p>Assuming that:</p>
  532. <ul>
  533. <li>
  534. <code>/usr/joe/wordcount/input</code> - input directory in HDFS
  535. </li>
  536. <li>
  537. <code>/usr/joe/wordcount/output</code> - output directory in HDFS
  538. </li>
  539. </ul>
  540. <p>Sample text-files as input:</p>
  541. <p>
  542. <code>$ bin/hadoop dfs -ls /usr/joe/wordcount/input/</code><br/>
  543. <code>/usr/joe/wordcount/input/file01</code><br/>
  544. <code>/usr/joe/wordcount/input/file02</code><br/>
  545. <br/>
  546. <code>$ bin/hadoop dfs -cat /usr/joe/wordcount/input/file01</code><br/>
  547. <code>Hello World Bye World</code><br/>
  548. <br/>
  549. <code>$ bin/hadoop dfs -cat /usr/joe/wordcount/input/file02</code><br/>
  550. <code>Hello Hadoop Goodbye Hadoop</code>
  551. </p>
  552. <p>Run the application:</p>
  553. <p>
  554. <code>
  555. $ bin/hadoop jar /usr/joe/wordcount.jar org.myorg.WordCount
  556. /usr/joe/wordcount/input /usr/joe/wordcount/output
  557. </code>
  558. </p>
  559. <p>Output:</p>
  560. <p>
  561. <code>
  562. $ bin/hadoop dfs -cat /usr/joe/wordcount/output/part-00000
  563. </code>
  564. <br/>
  565. <code>Bye 1</code><br/>
  566. <code>Goodbye 1</code><br/>
  567. <code>Hadoop 2</code><br/>
  568. <code>Hello 2</code><br/>
  569. <code>World 2</code><br/>
  570. </p>
  571. </section>
  572. <section>
  573. <title>Walk-through</title>
  574. <p>The <code>WordCount</code> application is quite straight-forward.</p>
  575. <p>The <code>Mapper</code> implementation (lines 14-26), via the
  576. <code>map</code> method (lines 18-25), processes one line at a time,
  577. as provided by the specified <code>TextInputFormat</code> (line 49).
  578. It then splits the line into tokens separated by whitespaces, via the
  579. <code>StringTokenizer</code>, and emits a key-value pair of
  580. <code>&lt; &lt;word&gt;, 1&gt;</code>.</p>
  581. <p>
  582. For the given sample input the first map emits:<br/>
  583. <code>&lt; Hello, 1&gt;</code><br/>
  584. <code>&lt; World, 1&gt;</code><br/>
  585. <code>&lt; Bye, 1&gt;</code><br/>
  586. <code>&lt; World, 1&gt;</code><br/>
  587. </p>
  588. <p>
  589. The second map emits:<br/>
  590. <code>&lt; Hello, 1&gt;</code><br/>
  591. <code>&lt; Hadoop, 1&gt;</code><br/>
  592. <code>&lt; Goodbye, 1&gt;</code><br/>
  593. <code>&lt; Hadoop, 1&gt;</code><br/>
  594. </p>
  595. <p>We'll learn more about the number of maps spawned for a given job, and
  596. how to control them in a fine-grained manner, a bit later in the
  597. tutorial.</p>
  598. <p><code>WordCount</code> also specifies a <code>combiner</code> (line
  599. 46). Hence, the output of each map is passed through the local combiner
  600. (which is same as the <code>Reducer</code> as per the job
  601. configuration) for local aggregation, after being sorted on the
  602. <em>key</em>s.</p>
  603. <p>
  604. The output of the first map:<br/>
  605. <code>&lt; Bye, 1&gt;</code><br/>
  606. <code>&lt; Hello, 1&gt;</code><br/>
  607. <code>&lt; World, 2&gt;</code><br/>
  608. </p>
  609. <p>
  610. The output of the second map:<br/>
  611. <code>&lt; Goodbye, 1&gt;</code><br/>
  612. <code>&lt; Hadoop, 2&gt;</code><br/>
  613. <code>&lt; Hello, 1&gt;</code><br/>
  614. </p>
  615. <p>The <code>Reducer</code> implementation (lines 28-36), via the
  616. <code>reduce</code> method (lines 29-35) just sums up the values,
  617. which are the occurence counts for each key (i.e. words in this example).
  618. </p>
  619. <p>
  620. Thus the output of the job is:<br/>
  621. <code>&lt; Bye, 1&gt;</code><br/>
  622. <code>&lt; Goodbye, 1&gt;</code><br/>
  623. <code>&lt; Hadoop, 2&gt;</code><br/>
  624. <code>&lt; Hello, 2&gt;</code><br/>
  625. <code>&lt; World, 2&gt;</code><br/>
  626. </p>
  627. <p>The <code>run</code> method specifies various facets of the job, such
  628. as the input/output paths (passed via the command line), key/value
  629. types, input/output formats etc., in the <code>JobConf</code>.
  630. It then calls the <code>JobClient.runJob</code> (line 55) to submit the
  631. and monitor its progress.</p>
  632. <p>We'll learn more about <code>JobConf</code>, <code>JobClient</code>,
  633. <code>Tool</code> and other interfaces and classes a bit later in the
  634. tutorial.</p>
  635. </section>
  636. </section>
  637. <section>
  638. <title>Map-Reduce - User Interfaces</title>
  639. <p>This section provides a reasonable amount of detail on every user-facing
  640. aspect of the Map-Reduce framwork. This should help users implement,
  641. configure and tune their jobs in a fine-grained manner. However, please
  642. note that the javadoc for each class/interface remains the most
  643. comprehensive documentation available; this is only meant to be a tutorial.
  644. </p>
  645. <p>Let us first take the <code>Mapper</code> and <code>Reducer</code>
  646. interfaces. Applications typically implement them to provide the
  647. <code>map</code> and <code>reduce</code> methods.</p>
  648. <p>We will then discuss other core interfaces including
  649. <code>JobConf</code>, <code>JobClient</code>, <code>Partitioner</code>,
  650. <code>OutputCollector</code>, <code>Reporter</code>,
  651. <code>InputFormat</code>, <code>OutputFormat</code> and others.</p>
  652. <p>Finally, we will wrap up by discussing some useful features of the
  653. framework such as the <code>DistributedCache</code>,
  654. <code>IsolationRunner</code> etc.</p>
  655. <section>
  656. <title>Payload</title>
  657. <p>Applications typically implement the <code>Mapper</code> and
  658. <code>Reducer</code> interfaces to provide the <code>map</code> and
  659. <code>reduce</code> methods. These form the core of the job.</p>
  660. <section>
  661. <title>Mapper</title>
  662. <p><a href="ext:api/org/apache/hadoop/mapred/mapper">
  663. Mapper</a> maps input key/value pairs to a set of intermediate
  664. key/value pairs.</p>
  665. <p>Maps are the individual tasks that transform input records into
  666. intermediate records. The transformed intermediate records do not need
  667. to be of the same type as the input records. A given input pair may
  668. map to zero or many output pairs.</p>
  669. <p>The Hadoop Map-Reduce framework spawns one map task for each
  670. <code>InputSplit</code> generated by the <code>InputFormat</code> for
  671. the job.</p>
  672. <p>Overall, <code>Mapper</code> implementations are passed the
  673. <code>JobConf</code> for the job via the
  674. <a href="ext:api/org/apache/hadoop/mapred/jobconfigurable/configure">
  675. JobConfigurable.configure(JobConf)</a> method and override it to
  676. initialize themselves. The framework then calls
  677. <a href="ext:api/org/apache/hadoop/mapred/mapper/map">
  678. map(WritableComparable, Writable, OutputCollector, Reporter)</a> for
  679. each key/value pair in the <code>InputSplit</code> for that task.
  680. Applications can then override the
  681. <a href="ext:api/org/apache/hadoop/io/closeable/close">
  682. Closeable.close()</a> method to perform any required cleanup.</p>
  683. <p>Output pairs do not need to be of the same types as input pairs. A
  684. given input pair may map to zero or many output pairs. Output pairs
  685. are collected with calls to
  686. <a href="ext:api/org/apache/hadoop/mapred/outputcollector/collect">
  687. OutputCollector.collect(WritableComparable,Writable)</a>.</p>
  688. <p>Applications can use the <code>Reporter</code> to report
  689. progress, set application-level status messages and update
  690. <code>Counters</code>, or just indicate that they are alive.</p>
  691. <p>All intermediate values associated with a given output key are
  692. subsequently grouped by the framework, and passed to the
  693. <code>Reducer</code>(s) to determine the final output. Users can
  694. control the grouping by specifying a <code>Comparator</code> via
  695. <a href="ext:api/org/apache/hadoop/mapred/jobconf/setoutputkeycomparatorclass">
  696. JobConf.setOutputKeyComparatorClass(Class)</a>.</p>
  697. <p>The <code>Mapper</code> outputs are sorted and then
  698. partitioned per <code>Reducer</code>. The total number of partitions is
  699. the same as the number of reduce tasks for the job. Users can control
  700. which keys (and hence records) go to which <code>Reducer</code> by
  701. implementing a custom <code>Partitioner</code>.</p>
  702. <p>Users can optionally specify a <code>combiner</code>, via
  703. <a href="ext:api/org/apache/hadoop/mapred/jobconf/setcombinerclass">
  704. JobConf.setCombinerClass(Class)</a>, to perform local aggregation of
  705. the intermediate outputs, which helps to cut down the amount of data
  706. transferred from the <code>Mapper</code> to the <code>Reducer</code>.
  707. </p>
  708. <p>The intermediate, sorted outputs are always stored in files of
  709. <a href="ext:api/org/apache/hadoop/io/sequencefile">
  710. SequenceFile</a> format. Applications can control if, and how, the
  711. intermediate outputs are to be compressed and the
  712. <a href="ext:api/org/apache/hadoop/io/compress/compressioncodec">
  713. CompressionCodec</a> to be used via the <code>JobConf</code>.
  714. </p>
  715. <section>
  716. <title>How Many Maps?</title>
  717. <p>The number of maps is usually driven by the total size of the
  718. inputs, that is, the total number of blocks of the input files.</p>
  719. <p>The right level of parallelism for maps seems to be around 10-100
  720. maps per-node, although it has been set up to 300 maps for very
  721. cpu-light map tasks. Task setup takes awhile, so it is best if the
  722. maps take at least a minute to execute.</p>
  723. <p>Thus, if you expect 10TB of input data and have a blocksize of
  724. <code>128MB</code>, you'll end up with 82,000 maps, unless
  725. <a href="ext:api/org/apache/hadoop/mapred/jobconf/setnummaptasks">
  726. setNumMapTasks(int)</a> (which only provides a hint to the framework)
  727. is used to set it even higher.</p>
  728. </section>
  729. </section>
  730. <section>
  731. <title>Reducer</title>
  732. <p><a href="ext:api/org/apache/hadoop/mapred/reducer">
  733. Reducer</a> reduces a set of intermediate values which share a key to
  734. a smaller set of values.</p>
  735. <p>The number of reduces for the job is set by the user
  736. via <a href="ext:api/org/apache/hadoop/mapred/jobconf/setnumreducetasks">
  737. JobConf.setNumReduceTasks(int)</a>.</p>
  738. <p>Overall, <code>Reducer</code> implementations are passed the
  739. <code>JobConf</code> for the job via the
  740. <a href="ext:api/org/apache/hadoop/mapred/jobconfigurable/configure">
  741. JobConfigurable.configure(JobConf)</a> method and can override it to
  742. initialize themselves. The framework then calls
  743. <a href="ext:api/org/apache/hadoop/mapred/reducer/reduce">
  744. reduce(WritableComparable, Iterator, OutputCollector, Reporter)</a>
  745. method for each <code>&lt;key, (list of values)&gt;</code>
  746. pair in the grouped inputs. Applications can then override the
  747. <a href="ext:api/org/apache/hadoop/io/closeable/close">
  748. Closeable.close()</a> method to perform any required cleanup.</p>
  749. <p><code>Reducer</code> has 3 primary phases: shuffle, sort and reduce.
  750. </p>
  751. <section>
  752. <title>Shuffle</title>
  753. <p>Input to the <code>Reducer</code> is the sorted output of the
  754. mappers. In this phase the framework fetches the relevant partition
  755. of the output of all the mappers, via HTTP.</p>
  756. </section>
  757. <section>
  758. <title>Sort</title>
  759. <p>The framework groups <code>Reducer</code> inputs by keys (since
  760. different mappers may have output the same key) in this stage.</p>
  761. <p>The shuffle and sort phases occur simultaneously; while
  762. map-outputs are being fetched they are merged.</p>
  763. <section>
  764. <title>Secondary Sort</title>
  765. <p>If equivalence rules for grouping the intermediate keys are
  766. required to be different from those for grouping keys before
  767. reduction, then one may specify a <code>Comparator</code> via
  768. <a href="ext:api/org/apache/hadoop/mapred/jobconf/setoutputvaluegroupingcomparator">
  769. JobConf.setOutputValueGroupingComparator(Class)</a>. Since
  770. <a href="ext:api/org/apache/hadoop/mapred/jobconf/setoutputkeycomparatorclass">
  771. JobConf.setOutputKeyComparatorClass(Class)</a> can be used to
  772. control how intermediate keys are grouped, these can be used in
  773. conjunction to simulate <em>secondary sort on values</em>.</p>
  774. </section>
  775. </section>
  776. <section>
  777. <title>Reduce</title>
  778. <p>In this phase the
  779. <a href="ext:api/org/apache/hadoop/mapred/reducer/reduce">
  780. reduce(WritableComparable, Iterator, OutputCollector, Reporter)</a>
  781. method is called for each <code>&lt;key, (list of values)&gt;</code>
  782. pair in the grouped inputs.</p>
  783. <p>The output of the reduce task is typically written to the
  784. <a href="ext:api/org/apache/hadoop/fs/filesystem">
  785. FileSystem</a> via
  786. <a href="ext:api/org/apache/hadoop/mapred/outputcollector/collect">
  787. OutputCollector.collect(WritableComparable, Writable)</a>.</p>
  788. <p>Applications can use the <code>Reporter</code> to report
  789. progress, set application-level status messages and update
  790. <code>Counters</code>, or just indicate that they are alive.</p>
  791. <p>The output of the <code>Reducer</code> is <em>not sorted</em>.</p>
  792. </section>
  793. <section>
  794. <title>How Many Reduces?</title>
  795. <p>The right number of reduces seems to be <code>0.95</code> or
  796. <code>1.75</code> multiplied by (&lt;<em>no. of nodes</em>&gt; *
  797. <code>mapred.tasktracker.reduce.tasks.maximum</code>).</p>
  798. <p>With <code>0.95</code> all of the reduces can launch immediately
  799. and start transfering map outputs as the maps finish. With
  800. <code>1.75</code> the faster nodes will finish their first round of
  801. reduces and launch a second wave of reduces doing a much better job
  802. of load balancing.</p>
  803. <p>Increasing the number of reduces increases the framework overhead,
  804. but increases load balancing and lowers the cost of failures.</p>
  805. <p>The scaling factors above are slightly less than whole numbers to
  806. reserve a few reduce slots in the framework for speculative-tasks and
  807. failed tasks.</p>
  808. </section>
  809. <section>
  810. <title>Reducer NONE</title>
  811. <p>It is legal to set the number of reduce-tasks to <em>zero</em> if
  812. no reduction is desired.</p>
  813. <p>In this case the outputs of the map-tasks go directly to the
  814. <code>FileSystem</code>, into the output path set by
  815. <a href="ext:api/org/apache/hadoop/mapred/fileoutputformat/setoutputpath">
  816. setOutputPath(Path)</a>. The framework does not sort the
  817. map-outputs before writing them out to the <code>FileSystem</code>.
  818. </p>
  819. </section>
  820. </section>
  821. <section>
  822. <title>Partitioner</title>
  823. <p><a href="ext:api/org/apache/hadoop/mapred/partitioner">
  824. Partitioner</a> partitions the key space.</p>
  825. <p>Partitioner controls the partitioning of the keys of the
  826. intermediate map-outputs. The key (or a subset of the key) is used to
  827. derive the partition, typically by a <em>hash function</em>. The total
  828. number of partitions is the same as the number of reduce tasks for the
  829. job. Hence this controls which of the <code>m</code> reduce tasks the
  830. intermediate key (and hence the record) is sent to for reduction.</p>
  831. <p><a href="ext:api/org/apache/hadoop/mapred/lib/hashpartitioner">
  832. HashPartitioner</a> is the default <code>Partitioner</code>.</p>
  833. </section>
  834. <section>
  835. <title>Reporter</title>
  836. <p><a href="ext:api/org/apache/hadoop/mapred/reporter">
  837. Reporter</a> is a facility for Map-Reduce applications to report
  838. progress, set application-level status messages and update
  839. <code>Counters</code>.</p>
  840. <p><code>Mapper</code> and <code>Reducer</code> implementations can use
  841. the <code>Reporter</code> to report progress or just indicate
  842. that they are alive. In scenarios where the application takes a
  843. significant amount of time to process individual key/value pairs,
  844. this is crucial since the framework might assume that the task has
  845. timed-out and kill that task. Another way to avoid this is to
  846. set the configuration parameter <code>mapred.task.timeout</code> to a
  847. high-enough value (or even set it to <em>zero</em> for no time-outs).
  848. </p>
  849. <p>Applications can also update <code>Counters</code> using the
  850. <code>Reporter</code>.</p>
  851. </section>
  852. <section>
  853. <title>OutputCollector</title>
  854. <p><a href="ext:api/org/apache/hadoop/mapred/outputcollector">
  855. OutputCollector</a> is a generalization of the facility provided by
  856. the Map-Reduce framework to collect data output by the
  857. <code>Mapper</code> or the <code>Reducer</code> (either the
  858. intermediate outputs or the output of the job).</p>
  859. </section>
  860. <p>Hadoop Map-Reduce comes bundled with a
  861. <a href="ext:api/org/apache/hadoop/mapred/lib/package-summary">
  862. library</a> of generally useful mappers, reducers, and partitioners.</p>
  863. </section>
  864. <section>
  865. <title>Job Configuration</title>
  866. <p><a href="ext:api/org/apache/hadoop/mapred/jobconf">
  867. JobConf</a> represents a Map-Reduce job configuration.</p>
  868. <p><code>JobConf</code> is the primary interface for a user to describe
  869. a map-reduce job to the Hadoop framework for execution. The framework
  870. tries to faithfully execute the job as described by <code>JobConf</code>,
  871. however:</p>
  872. <ul>
  873. <li>f
  874. Some configuration parameters may have been marked as
  875. <a href="ext:api/org/apache/hadoop/conf/configuration/final_parameters">
  876. final</a> by administrators and hence cannot be altered.
  877. </li>
  878. <li>
  879. While some job parameters are straight-forward to set (e.g.
  880. <a href="ext:api/org/apache/hadoop/mapred/jobconf/setnumreducetasks">
  881. setNumReduceTasks(int)</a>), other parameters interact subtly with
  882. the rest of the framework and/or job configuration and are
  883. more complex to set (e.g.
  884. <a href="ext:api/org/apache/hadoop/mapred/jobconf/setnummaptasks">
  885. setNumMapTasks(int)</a>).
  886. </li>
  887. </ul>
  888. <p><code>JobConf</code> is typically used to specify the
  889. <code>Mapper</code>, combiner (if any), <code>Partitioner</code>,
  890. <code>Reducer</code>, <code>InputFormat</code> and
  891. <code>OutputFormat</code> implementations. <code>JobConf</code> also
  892. indicates the set of input files
  893. (<a href="ext:api/org/apache/hadoop/mapred/fileinputformat/setinputpaths">setInputPaths(JobConf, Path...)</a>
  894. /<a href="ext:api/org/apache/hadoop/mapred/fileinputformat/addinputpath">addInputPath(JobConf, Path)</a>)
  895. and (<a href="ext:api/org/apache/hadoop/mapred/fileinputformat/setinputpathstring">setInputPaths(JobConf, String)</a>
  896. /<a href="ext:api/org/apache/hadoop/mapred/fileinputformat/addinputpathstring">addInputPaths(JobConf, String)</a>)
  897. and where the output files should be written
  898. (<a href="ext:api/org/apache/hadoop/mapred/fileoutputformat/setoutputpath">setOutputPath(Path)</a>).</p>
  899. <p>Optionally, <code>JobConf</code> is used to specify other advanced
  900. facets of the job such as the <code>Comparator</code> to be used, files
  901. to be put in the <code>DistributedCache</code>, whether intermediate
  902. and/or job outputs are to be compressed (and how), debugging via
  903. user-provided scripts
  904. (<a href="ext:api/org/apache/hadoop/mapred/jobconf/setmapdebugscript">setMapDebugScript(String)</a>/<a href="ext:api/org/apache/hadoop/mapred/jobconf/setreducedebugscript">setReduceDebugScript(String)</a>)
  905. , whether job tasks can be executed in a <em>speculative</em> manner
  906. (<a href="ext:api/org/apache/hadoop/mapred/jobconf/setmapspeculativeexecution">setMapSpeculativeExecution(boolean)</a>)/(<a href="ext:api/org/apache/hadoop/mapred/jobconf/setreducespeculativeexecution">setReduceSpeculativeExecution(boolean)</a>)
  907. , maximum number of attempts per task
  908. (<a href="ext:api/org/apache/hadoop/mapred/jobconf/setmaxmapattempts">setMaxMapAttempts(int)</a>/<a href="ext:api/org/apache/hadoop/mapred/jobconf/setmaxreduceattempts">setMaxReduceAttempts(int)</a>)
  909. , percentage of tasks failure which can be tolerated by the job
  910. (<a href="ext:api/org/apache/hadoop/mapred/jobconf/setmaxmaptaskfailurespercent">setMaxMapTaskFailuresPercent(int)</a>/<a href="ext:api/org/apache/hadoop/mapred/jobconf/setmaxreducetaskfailurespercent">setMaxReduceTaskFailuresPercent(int)</a>)
  911. etc.</p>
  912. <p>Of course, users can use
  913. <a href="ext:api/org/apache/hadoop/conf/configuration/set">set(String, String)</a>/<a href="ext:api/org/apache/hadoop/conf/configuration/get">get(String, String)</a>
  914. to set/get arbitrary parameters needed by applications. However, use the
  915. <code>DistributedCache</code> for large amounts of (read-only) data.</p>
  916. </section>
  917. <section>
  918. <title>Task Execution &amp; Environment</title>
  919. <p>The <code>TaskTracker</code> executes the <code>Mapper</code>/
  920. <code>Reducer</code> <em>task</em> as a child process in a separate jvm.
  921. </p>
  922. <p>The child-task inherits the environment of the parent
  923. <code>TaskTracker</code>. The user can specify additional options to the
  924. child-jvm via the <code>mapred.child.java.opts</code> configuration
  925. parameter in the <code>JobConf</code> such as non-standard paths for the
  926. run-time linker to search shared libraries via
  927. <code>-Djava.library.path=&lt;&gt;</code> etc. If the
  928. <code>mapred.child.java.opts</code> contains the symbol <em>@taskid@</em>
  929. it is interpolated with value of <code>taskid</code> of the map/reduce
  930. task.</p>
  931. <p>Here is an example with multiple arguments and substitutions,
  932. showing jvm GC logging, and start of a passwordless JVM JMX agent so that
  933. it can connect with jconsole and the likes to watch child memory,
  934. threads and get thread dumps. It also sets the maximum heap-size of the
  935. child jvm to 512MB and adds an additional path to the
  936. <code>java.library.path</code> of the child-jvm.</p>
  937. <p>
  938. <code>&lt;property&gt;</code><br/>
  939. &nbsp;&nbsp;<code>&lt;name&gt;mapred.child.java.opts&lt;/name&gt;</code><br/>
  940. &nbsp;&nbsp;<code>&lt;value&gt;</code><br/>
  941. &nbsp;&nbsp;&nbsp;&nbsp;<code>
  942. -Xmx512M -Djava.library.path=/home/mycompany/lib
  943. -verbose:gc -Xloggc:/tmp/@taskid@.gc</code><br/>
  944. &nbsp;&nbsp;&nbsp;&nbsp;<code>
  945. -Dcom.sun.management.jmxremote.authenticate=false
  946. -Dcom.sun.management.jmxremote.ssl=false</code><br/>
  947. &nbsp;&nbsp;<code>&lt;/value&gt;</code><br/>
  948. <code>&lt;/property&gt;</code>
  949. </p>
  950. <p>Users/admins can also specify the maximum virtual memory
  951. of the launched child-task using <code>mapred.child.ulimit</code>.</p>
  952. <p>When the job starts, the localized job directory
  953. <code> ${mapred.local.dir}/taskTracker/jobcache/$jobid/</code>
  954. has the following directories: </p>
  955. <ul>
  956. <li> A job-specific shared directory, created at location
  957. <code>${mapred.local.dir}/taskTracker/jobcache/$jobid/work/ </code>.
  958. This directory is exposed to the users through
  959. <code>job.local.dir </code>. The tasks can use this space as scratch
  960. space and share files among them. The directory can accessed through
  961. api <a href="ext:api/org/apache/hadoop/mapred/jobconf/getjoblocaldir">
  962. JobConf.getJobLocalDir()</a>. It is available as System property also.
  963. So,users can call <code>System.getProperty("job.local.dir")</code>;
  964. </li>
  965. <li>A jars directory, which has the job jar file and expanded jar </li>
  966. <li>A job.xml file, the generic job configuration </li>
  967. <li>Each task has directory <code>task-id</code> which again has the
  968. following structure
  969. <ul>
  970. <li>A job.xml file, task localized job configuration </li>
  971. <li>A directory for intermediate output files</li>
  972. <li>The working directory of the task.
  973. And work directory has a temporary directory
  974. to create temporary files</li>
  975. </ul>
  976. </li>
  977. </ul>
  978. <p>The <a href="#DistributedCache">DistributedCache</a> can also be used
  979. as a rudimentary software distribution mechanism for use in the map
  980. and/or reduce tasks. It can be used to distribute both jars and
  981. native libraries. The
  982. <a href="ext:api/org/apache/hadoop/filecache/distributedcache/addarchivetoclasspath">
  983. DistributedCache.addArchiveToClassPath(Path, Configuration)</a> or
  984. <a href="ext:api/org/apache/hadoop/filecache/distributedcache/addfiletoclasspath">
  985. DistributedCache.addFileToClassPath(Path, Configuration)</a> api can
  986. be used to cache files/jars and also add them to the <em>classpath</em>
  987. of child-jvm. Similarly the facility provided by the
  988. <code>DistributedCache</code> where-in it symlinks the cached files into
  989. the working directory of the task can be used to distribute native
  990. libraries and load them. The underlying detail is that child-jvm always
  991. has its <em>current working directory</em> added to the
  992. <code>java.library.path</code> and hence the cached libraries can be
  993. loaded via <a href="http://java.sun.com/j2se/1.5.0/docs/api/java/lang/System.html#loadLibrary(java.lang.String)">
  994. System.loadLibrary</a> or <a href="http://java.sun.com/j2se/1.5.0/docs/api/java/lang/System.html#load(java.lang.String)">
  995. System.load</a>.</p>
  996. </section>
  997. <section>
  998. <title>Job Submission and Monitoring</title>
  999. <p><a href="ext:api/org/apache/hadoop/mapred/jobclient">
  1000. JobClient</a> is the primary interface by which user-job interacts
  1001. with the <code>JobTracker</code>.</p>
  1002. <p><code>JobClient</code> provides facilities to submit jobs, track their
  1003. progress, access component-tasks' reports/logs, get the Map-Reduce
  1004. cluster's status information and so on.</p>
  1005. <p>The job submission process involves:</p>
  1006. <ol>
  1007. <li>Checking the input and output specifications of the job.</li>
  1008. <li>Computing the <code>InputSplit</code> values for the job.</li>
  1009. <li>
  1010. Setting up the requisite accounting information for the
  1011. <code>DistributedCache</code> of the job, if necessary.
  1012. </li>
  1013. <li>
  1014. Copying the job's jar and configuration to the map-reduce system
  1015. directory on the <code>FileSystem</code>.
  1016. </li>
  1017. <li>
  1018. Submitting the job to the <code>JobTracker</code> and optionally
  1019. monitoring it's status.
  1020. </li>
  1021. </ol>
  1022. <p> Job history files are also logged to user specified directory
  1023. <code>hadoop.job.history.user.location</code>
  1024. which defaults to job output directory. The files are stored in
  1025. "_logs/history/" in the specified directory. Hence, by default they
  1026. will be in mapred.output.dir/_logs/history. User can stop
  1027. logging by giving the value <code>none</code> for
  1028. <code>hadoop.job.history.user.location</code></p>
  1029. <p> User can view the history logs summary in specified directory
  1030. using the following command <br/>
  1031. <code>$ bin/hadoop job -history output-dir</code><br/>
  1032. This command will print job details, failed and killed tip
  1033. details. <br/>
  1034. More details about the job such as successful tasks and
  1035. task attempts made for each task can be viewed using the
  1036. following command <br/>
  1037. <code>$ bin/hadoop job -history all output-dir</code><br/></p>
  1038. <p> User can use
  1039. <a href="ext:api/org/apache/hadoop/mapred/outputlogfilter">OutputLogFilter</a>
  1040. to filter log files from the output directory listing. </p>
  1041. <p>Normally the user creates the application, describes various facets
  1042. of the job via <code>JobConf</code>, and then uses the
  1043. <code>JobClient</code> to submit the job and monitor its progress.</p>
  1044. <section>
  1045. <title>Job Control</title>
  1046. <p>Users may need to chain map-reduce jobs to accomplish complex
  1047. tasks which cannot be done via a single map-reduce job. This is fairly
  1048. easy since the output of the job typically goes to distributed
  1049. file-system, and the output, in turn, can be used as the input for the
  1050. next job.</p>
  1051. <p>However, this also means that the onus on ensuring jobs are
  1052. complete (success/failure) lies squarely on the clients. In such
  1053. cases, the various job-control options are:</p>
  1054. <ul>
  1055. <li>
  1056. <a href="ext:api/org/apache/hadoop/mapred/jobclient/runjob">
  1057. runJob(JobConf)</a> : Submits the job and returns only after the
  1058. job has completed.
  1059. </li>
  1060. <li>
  1061. <a href="ext:api/org/apache/hadoop/mapred/jobclient/submitjob">
  1062. submitJob(JobConf)</a> : Only submits the job, then poll the
  1063. returned handle to the
  1064. <a href="ext:api/org/apache/hadoop/mapred/runningjob">
  1065. RunningJob</a> to query status and make scheduling decisions.
  1066. </li>
  1067. <li>
  1068. <a href="ext:api/org/apache/hadoop/mapred/jobconf/setjobendnotificationuri">
  1069. JobConf.setJobEndNotificationURI(String)</a> : Sets up a
  1070. notification upon job-completion, thus avoiding polling.
  1071. </li>
  1072. </ul>
  1073. </section>
  1074. </section>
  1075. <section>
  1076. <title>Job Input</title>
  1077. <p><a href="ext:api/org/apache/hadoop/mapred/inputformat">
  1078. InputFormat</a> describes the input-specification for a Map-Reduce job.
  1079. </p>
  1080. <p>The Map-Reduce framework relies on the <code>InputFormat</code> of
  1081. the job to:</p>
  1082. <ol>
  1083. <li>Validate the input-specification of the job.</li>
  1084. <li>
  1085. Split-up the input file(s) into logical <code>InputSplit</code>
  1086. instances, each of which is then assigned to an individual
  1087. <code>Mapper</code>.
  1088. </li>
  1089. <li>
  1090. Provide the <code>RecordReader</code> implementation used to
  1091. glean input records from the logical <code>InputSplit</code> for
  1092. processing by the <code>Mapper</code>.
  1093. </li>
  1094. </ol>
  1095. <p>The default behavior of file-based <code>InputFormat</code>
  1096. implementations, typically sub-classes of
  1097. <a href="ext:api/org/apache/hadoop/mapred/fileinputformat">
  1098. FileInputFormat</a>, is to split the input into <em>logical</em>
  1099. <code>InputSplit</code> instances based on the total size, in bytes, of
  1100. the input files. However, the <code>FileSystem</code> blocksize of the
  1101. input files is treated as an upper bound for input splits. A lower bound
  1102. on the split size can be set via <code>mapred.min.split.size</code>.</p>
  1103. <p>Clearly, logical splits based on input-size is insufficient for many
  1104. applications since record boundaries must be respected. In such cases,
  1105. the application should implement a <code>RecordReader</code>, who is
  1106. responsible for respecting record-boundaries and presents a
  1107. record-oriented view of the logical <code>InputSplit</code> to the
  1108. individual task.</p>
  1109. <p><a href="ext:api/org/apache/hadoop/mapred/textinputformat">
  1110. TextInputFormat</a> is the default <code>InputFormat</code>.</p>
  1111. <p>If <code>TextInputFormat</code> is the <code>InputFormat</code> for a
  1112. given job, the framework detects input-files with the <em>.gz</em> and
  1113. <em>.lzo</em> extensions and automatically decompresses them using the
  1114. appropriate <code>CompressionCodec</code>. However, it must be noted that
  1115. compressed files with the above extensions cannot be <em>split</em> and
  1116. each compressed file is processed in its entirety by a single mapper.</p>
  1117. <section>
  1118. <title>InputSplit</title>
  1119. <p><a href="ext:api/org/apache/hadoop/mapred/inputsplit">
  1120. InputSplit</a> represents the data to be processed by an individual
  1121. <code>Mapper</code>.</p>
  1122. <p>Typically <code>InputSplit</code> presents a byte-oriented view of
  1123. the input, and it is the responsibility of <code>RecordReader</code>
  1124. to process and present a record-oriented view.</p>
  1125. <p><a href="ext:api/org/apache/hadoop/mapred/filesplit">
  1126. FileSplit</a> is the default <code>InputSplit</code>. It sets
  1127. <code>map.input.file</code> to the path of the input file for the
  1128. logical split.</p>
  1129. </section>
  1130. <section>
  1131. <title>RecordReader</title>
  1132. <p><a href="ext:api/org/apache/hadoop/mapred/recordreader">
  1133. RecordReader</a> reads <code>&lt;key, value&gt;</code> pairs from an
  1134. <code>InputSplit</code>.</p>
  1135. <p>Typically the <code>RecordReader</code> converts the byte-oriented
  1136. view of the input, provided by the <code>InputSplit</code>, and
  1137. presents a record-oriented to the <code>Mapper</code> implementations
  1138. for processing. <code>RecordReader</code> thus assumes the
  1139. responsibility of processing record boundaries and presents the tasks
  1140. with keys and values.</p>
  1141. </section>
  1142. </section>
  1143. <section>
  1144. <title>Job Output</title>
  1145. <p><a href="ext:api/org/apache/hadoop/mapred/outputformat">
  1146. OutputFormat</a> describes the output-specification for a Map-Reduce
  1147. job.</p>
  1148. <p>The Map-Reduce framework relies on the <code>OutputFormat</code> of
  1149. the job to:</p>
  1150. <ol>
  1151. <li>
  1152. Validate the output-specification of the job; for example, check that
  1153. the output directory doesn't already exist.
  1154. </li>
  1155. <li>
  1156. Provide the <code>RecordWriter</code> implementation used to
  1157. write the output files of the job. Output files are stored in a
  1158. <code>FileSystem</code>.
  1159. </li>
  1160. </ol>
  1161. <p><code>TextOutputFormat</code> is the default
  1162. <code>OutputFormat</code>.</p>
  1163. <section>
  1164. <title>Task Side-Effect Files</title>
  1165. <p>In some applications, component tasks need to create and/or write to
  1166. side-files, which differ from the actual job-output files.</p>
  1167. <p>In such cases there could be issues with two instances of the same
  1168. <code>Mapper</code> or <code>Reducer</code> running simultaneously (for
  1169. example, speculative tasks) trying to open and/or write to the same
  1170. file (path) on the <code>FileSystem</code>. Hence the
  1171. application-writer will have to pick unique names per task-attempt
  1172. (using the attemptid, say <code>attempt_200709221812_0001_m_000000_0</code>),
  1173. not just per task.</p>
  1174. <p>To avoid these issues the Map-Reduce framework maintains a special
  1175. <code>${mapred.output.dir}/_temporary/_${taskid}</code> sub-directory
  1176. accessible via <code>${mapred.work.output.dir}</code>
  1177. for each task-attempt on the <code>FileSystem</code> where the output
  1178. of the task-attempt is stored. On successful completion of the
  1179. task-attempt, the files in the
  1180. <code>${mapred.output.dir}/_temporary/_${taskid}</code> (only)
  1181. are <em>promoted</em> to <code>${mapred.output.dir}</code>. Of course,
  1182. the framework discards the sub-directory of unsuccessful task-attempts.
  1183. This process is completely transparent to the application.</p>
  1184. <p>The application-writer can take advantage of this feature by
  1185. creating any side-files required in <code>${mapred.work.output.dir}</code>
  1186. during execution of a task via
  1187. <a href="ext:api/org/apache/hadoop/mapred/fileoutputformat/getworkoutputpath">
  1188. FileOutputFormat.getWorkOutputPath()</a>, and the framework will promote them
  1189. similarly for succesful task-attempts, thus eliminating the need to
  1190. pick unique paths per task-attempt.</p>
  1191. <p>Note: The value of <code>${mapred.work.output.dir}</code> during
  1192. execution of a particular task-attempt is actually
  1193. <code>${mapred.output.dir}/_temporary/_{$taskid}</code>, and this value is
  1194. set by the map-reduce framework. So, just create any side-files in the
  1195. path returned by
  1196. <a href="ext:api/org/apache/hadoop/mapred/fileoutputformat/getworkoutputpath">
  1197. FileOutputFormat.getWorkOutputPath() </a>from map/reduce
  1198. task to take advantage of this feature.</p>
  1199. <p>The entire discussion holds true for maps of jobs with
  1200. reducer=NONE (i.e. 0 reduces) since output of the map, in that case,
  1201. goes directly to HDFS.</p>
  1202. </section>
  1203. <section>
  1204. <title>RecordWriter</title>
  1205. <p><a href="ext:api/org/apache/hadoop/mapred/recordwriter">
  1206. RecordWriter</a> writes the output <code>&lt;key, value&gt;</code>
  1207. pairs to an output file.</p>
  1208. <p>RecordWriter implementations write the job outputs to the
  1209. <code>FileSystem</code>.</p>
  1210. </section>
  1211. </section>
  1212. <section>
  1213. <title>Other Useful Features</title>
  1214. <section>
  1215. <title>Counters</title>
  1216. <p><code>Counters</code> represent global counters, defined either by
  1217. the Map-Reduce framework or applications. Each <code>Counter</code> can
  1218. be of any <code>Enum</code> type. Counters of a particular
  1219. <code>Enum</code> are bunched into groups of type
  1220. <code>Counters.Group</code>.</p>
  1221. <p>Applications can define arbitrary <code>Counters</code> (of type
  1222. <code>Enum</code>) and update them via
  1223. <a href="ext:api/org/apache/hadoop/mapred/reporter/incrcounter">
  1224. Reporter.incrCounter(Enum, long)</a> in the <code>map</code> and/or
  1225. <code>reduce</code> methods. These counters are then globally
  1226. aggregated by the framework.</p>
  1227. </section>
  1228. <section>
  1229. <title>DistributedCache</title>
  1230. <p><a href="ext:api/org/apache/hadoop/filecache/distributedcache">
  1231. DistributedCache</a> distributes application-specific, large, read-only
  1232. files efficiently.</p>
  1233. <p><code>DistributedCache</code> is a facility provided by the
  1234. Map-Reduce framework to cache files (text, archives, jars and so on)
  1235. needed by applications.</p>
  1236. <p>Applications specify the files to be cached via urls (hdfs:// or
  1237. http://) in the <code>JobConf</code>. The <code>DistributedCache</code>
  1238. assumes that the files specified via hdfs:// urls are already present
  1239. on the <code>FileSystem</code>.</p>
  1240. <p>The framework will copy the necessary files to the slave node
  1241. before any tasks for the job are executed on that node. Its
  1242. efficiency stems from the fact that the files are only copied once
  1243. per job and the ability to cache archives which are un-archived on
  1244. the slaves.</p>
  1245. <p><code>DistributedCache</code> tracks the modification timestamps of
  1246. the cached files. Clearly the cache files should not be modified by
  1247. the application or externally while the job is executing.</p>
  1248. <p><code>DistributedCache</code> can be used to distribute simple,
  1249. read-only data/text files and more complex types such as archives and
  1250. jars. Archives (zip, tar, tgz and tar.gz files) are
  1251. <em>un-archived</em> at the slave nodes.
  1252. Optionally users can also direct the <code>DistributedCache</code> to
  1253. <em>symlink</em> the cached file(s) into the <code>current working
  1254. directory</code> of the task via the
  1255. <a href="ext:api/org/apache/hadoop/filecache/distributedcache/createsymlink">
  1256. DistributedCache.createSymlink(Configuration)</a> api. Files
  1257. have <em>execution permissions</em> set.</p>
  1258. </section>
  1259. <section>
  1260. <title>Tool</title>
  1261. <p>The <a href="ext:api/org/apache/hadoop/util/tool">Tool</a>
  1262. interface supports the handling of generic Hadoop command-line options.
  1263. </p>
  1264. <p><code>Tool</code> is the standard for any Map-Reduce tool or
  1265. application. The application should delegate the handling of
  1266. standard command-line options to
  1267. <a href="ext:api/org/apache/hadoop/util/genericoptionsparser">
  1268. GenericOptionsParser</a> via
  1269. <a href="ext:api/org/apache/hadoop/util/toolrunner/run">
  1270. ToolRunner.run(Tool, String[])</a> and only handle its custom
  1271. arguments.</p>
  1272. <p>
  1273. The generic Hadoop command-line options are:<br/>
  1274. <code>
  1275. -conf &lt;configuration file&gt;
  1276. </code>
  1277. <br/>
  1278. <code>
  1279. -D &lt;property=value&gt;
  1280. </code>
  1281. <br/>
  1282. <code>
  1283. -fs &lt;local|namenode:port&gt;
  1284. </code>
  1285. <br/>
  1286. <code>
  1287. -jt &lt;local|jobtracker:port&gt;
  1288. </code>
  1289. </p>
  1290. </section>
  1291. <section>
  1292. <title>IsolationRunner</title>
  1293. <p><a href="ext:api/org/apache/hadoop/mapred/isolationrunner">
  1294. IsolationRunner</a> is a utility to help debug Map-Reduce programs.</p>
  1295. <p>To use the <code>IsolationRunner</code>, first set
  1296. <code>keep.failed.tasks.files</code> to <code>true</code>
  1297. (also see <code>keep.tasks.files.pattern</code>).</p>
  1298. <p>
  1299. Next, go to the node on which the failed task ran and go to the
  1300. <code>TaskTracker</code>'s local directory and run the
  1301. <code>IsolationRunner</code>:<br/>
  1302. <code>$ cd &lt;local path&gt;/taskTracker/${taskid}/work</code><br/>
  1303. <code>
  1304. $ bin/hadoop org.apache.hadoop.mapred.IsolationRunner ../job.xml
  1305. </code>
  1306. </p>
  1307. <p><code>IsolationRunner</code> will run the failed task in a single
  1308. jvm, which can be in the debugger, over precisely the same input.</p>
  1309. </section>
  1310. <section>
  1311. <title>Debugging</title>
  1312. <p>Map/Reduce framework provides a facility to run user-provided
  1313. scripts for debugging. When map/reduce task fails, user can run
  1314. script for doing post-processing on task logs i.e task's stdout,
  1315. stderr, syslog and jobconf. The stdout and stderr of the
  1316. user-provided debug script are printed on the diagnostics.
  1317. These outputs are also displayed on job UI on demand. </p>
  1318. <p> In the following sections we discuss how to submit debug script
  1319. along with the job. For submitting debug script, first it has to
  1320. distributed. Then the script has to supplied in Configuration. </p>
  1321. <section>
  1322. <title> How to distribute script file: </title>
  1323. <p>
  1324. To distribute the debug script file, first copy the file to the dfs.
  1325. The file can be distributed by setting the property
  1326. "mapred.cache.files" with value "path"#"script-name".
  1327. If more than one file has to be distributed, the files can be added
  1328. as comma separated paths. This property can also be set by APIs
  1329. <a href="ext:api/org/apache/hadoop/filecache/distributedcache/addcachefile">
  1330. DistributedCache.addCacheFile(URI,conf) </a> and
  1331. <a href="ext:api/org/apache/hadoop/filecache/distributedcache/setcachefiles">
  1332. DistributedCache.setCacheFiles(URIs,conf) </a> where URI is of
  1333. the form "hdfs://host:port/'absolutepath'#'script-name'".
  1334. For Streaming, the file can be added through
  1335. command line option -cacheFile.
  1336. </p>
  1337. <p>
  1338. The files has to be symlinked in the current working directory of
  1339. of the task. To create symlink for the file, the property
  1340. "mapred.create.symlink" is set to "yes". This can also be set by
  1341. <a href="ext:api/org/apache/hadoop/filecache/distributedcache/createsymlink">
  1342. DistributedCache.createSymLink(Configuration) </a> api.
  1343. </p>
  1344. </section>
  1345. <section>
  1346. <title> How to submit script: </title>
  1347. <p> A quick way to submit debug script is to set values for the
  1348. properties "mapred.map.task.debug.script" and
  1349. "mapred.reduce.task.debug.script" for debugging map task and reduce
  1350. task respectively. These properties can also be set by using APIs
  1351. <a href="ext:api/org/apache/hadoop/mapred/jobconf/setmapdebugscript">
  1352. JobConf.setMapDebugScript(String) </a> and
  1353. <a href="ext:api/org/apache/hadoop/mapred/jobconf/setreducedebugscript">
  1354. JobConf.setReduceDebugScript(String) </a>. For streaming, debug
  1355. script can be submitted with command-line options -mapdebug,
  1356. -reducedebug for debugging mapper and reducer respectively.</p>
  1357. <p>The arguments of the script are task's stdout, stderr,
  1358. syslog and jobconf files. The debug command, run on the node where
  1359. the map/reduce failed, is: <br/>
  1360. <code> $script $stdout $stderr $syslog $jobconf </code> </p>
  1361. <p> Pipes programs have the c++ program name as a fifth argument
  1362. for the command. Thus for the pipes programs the command is <br/>
  1363. <code>$script $stdout $stderr $syslog $jobconf $program </code>
  1364. </p>
  1365. </section>
  1366. <section>
  1367. <title> Default Behavior: </title>
  1368. <p> For pipes, a default script is run to process core dumps under
  1369. gdb, prints stack trace and gives info about running threads. </p>
  1370. </section>
  1371. </section>
  1372. <section>
  1373. <title>JobControl</title>
  1374. <p><a href="ext:api/org/apache/hadoop/mapred/jobcontrol/package-summary">
  1375. JobControl</a> is a utility which encapsulates a set of Map-Reduce jobs
  1376. and their dependencies.</p>
  1377. </section>
  1378. <section>
  1379. <title>Data Compression</title>
  1380. <p>Hadoop Map-Reduce provides facilities for the application-writer to
  1381. specify compression for both intermediate map-outputs and the
  1382. job-outputs i.e. output of the reduces. It also comes bundled with
  1383. <a href="ext:api/org/apache/hadoop/io/compress/compressioncodec">
  1384. CompressionCodec</a> implementations for the
  1385. <a href="ext:zlib">zlib</a> and <a href="ext:lzo">lzo</a> compression
  1386. algorithms. The <a href="ext:gzip">gzip</a> file format is also
  1387. supported.</p>
  1388. <p>Hadoop also provides native implementations of the above compression
  1389. codecs for reasons of both performance (zlib) and non-availability of
  1390. Java libraries (lzo). More details on their usage and availability are
  1391. available <a href="native_libraries.html">here</a>.</p>
  1392. <section>
  1393. <title>Intermediate Outputs</title>
  1394. <p>Applications can control compression of intermediate map-outputs
  1395. via the
  1396. <a href="ext:api/org/apache/hadoop/mapred/jobconf/setcompressmapoutput">
  1397. JobConf.setCompressMapOutput(boolean)</a> api and the
  1398. <code>CompressionCodec</code> to be used via the
  1399. <a href="ext:api/org/apache/hadoop/mapred/jobconf/setmapoutputcompressorclass">
  1400. JobConf.setMapOutputCompressorClass(Class)</a> api. Since
  1401. the intermediate map-outputs are always stored in the
  1402. <a href="ext:api/org/apache/hadoop/io/sequencefile">SequenceFile</a>
  1403. format, the
  1404. <a href="ext:api/org/apache/hadoop/io/sequencefilecompressiontype">
  1405. SequenceFile.CompressionType</a> (i.e.
  1406. <a href="ext:api/org/apache/hadoop/io/sequencefilecompressiontype/record">
  1407. RECORD</a> /
  1408. <a href="ext:api/org/apache/hadoop/io/sequencefilecompressiontype/block">
  1409. BLOCK</a> - defaults to <code>RECORD</code>) can be specified via the
  1410. <a href="ext:api/org/apache/hadoop/mapred/jobconf/setmapoutputcompressiontype">
  1411. JobConf.setMapOutputCompressionType(SequenceFile.CompressionType)</a>
  1412. api.</p>
  1413. </section>
  1414. <section>
  1415. <title>Job Outputs</title>
  1416. <p>Applications can control compression of job-outputs via the
  1417. <a href="ext:api/org/apache/hadoop/mapred/outputformatbase/setcompressoutput">
  1418. OutputFormatBase.setCompressOutput(JobConf, boolean)</a> api and the
  1419. <code>CompressionCodec</code> to be used can be specified via the
  1420. <a href="ext:api/org/apache/hadoop/mapred/outputformatbase/setoutputcompressorclass">
  1421. OutputFormatBase.setOutputCompressorClass(JobConf, Class)</a> api.</p>
  1422. <p>If the job outputs are to be stored in the
  1423. <a href="ext:api/org/apache/hadoop/mapred/sequencefileoutputformat">
  1424. SequenceFileOutputFormat</a>, the required
  1425. <code>SequenceFile.CompressionType</code> (i.e. <code>RECORD</code> /
  1426. <code>BLOCK</code> - defaults to <code>RECORD</code>)can be specified
  1427. via the
  1428. <a href="ext:api/org/apache/hadoop/mapred/sequencefileoutputformat/setoutputcompressiontype">
  1429. SequenceFileOutputFormat.setOutputCompressionType(JobConf,
  1430. SequenceFile.CompressionType)</a> api.</p>
  1431. </section>
  1432. </section>
  1433. </section>
  1434. </section>
  1435. <section>
  1436. <title>Example: WordCount v2.0</title>
  1437. <p>Here is a more complete <code>WordCount</code> which uses many of the
  1438. features provided by the Map-Reduce framework we discussed so far.</p>
  1439. <p>This needs the HDFS to be up and running, especially for the
  1440. <code>DistributedCache</code>-related features. Hence it only works with a
  1441. <a href="quickstart.html#SingleNodeSetup">pseudo-distributed</a> or
  1442. <a href="quickstart.html#Fully-Distributed+Operation">fully-distributed</a>
  1443. Hadoop installation.</p>
  1444. <section>
  1445. <title>Source Code</title>
  1446. <table>
  1447. <tr>
  1448. <th></th>
  1449. <th>WordCount.java</th>
  1450. </tr>
  1451. <tr>
  1452. <td>1.</td>
  1453. <td>
  1454. <code>package org.myorg;</code>
  1455. </td>
  1456. </tr>
  1457. <tr>
  1458. <td>2.</td>
  1459. <td></td>
  1460. </tr>
  1461. <tr>
  1462. <td>3.</td>
  1463. <td>
  1464. <code>import java.io.*;</code>
  1465. </td>
  1466. </tr>
  1467. <tr>
  1468. <td>4.</td>
  1469. <td>
  1470. <code>import java.util.*;</code>
  1471. </td>
  1472. </tr>
  1473. <tr>
  1474. <td>5.</td>
  1475. <td></td>
  1476. </tr>
  1477. <tr>
  1478. <td>6.</td>
  1479. <td>
  1480. <code>import org.apache.hadoop.fs.Path;</code>
  1481. </td>
  1482. </tr>
  1483. <tr>
  1484. <td>7.</td>
  1485. <td>
  1486. <code>import org.apache.hadoop.filecache.DistributedCache;</code>
  1487. </td>
  1488. </tr>
  1489. <tr>
  1490. <td>8.</td>
  1491. <td>
  1492. <code>import org.apache.hadoop.conf.*;</code>
  1493. </td>
  1494. </tr>
  1495. <tr>
  1496. <td>9.</td>
  1497. <td>
  1498. <code>import org.apache.hadoop.io.*;</code>
  1499. </td>
  1500. </tr>
  1501. <tr>
  1502. <td>10.</td>
  1503. <td>
  1504. <code>import org.apache.hadoop.mapred.*;</code>
  1505. </td>
  1506. </tr>
  1507. <tr>
  1508. <td>11.</td>
  1509. <td>
  1510. <code>import org.apache.hadoop.util.*;</code>
  1511. </td>
  1512. </tr>
  1513. <tr>
  1514. <td>12.</td>
  1515. <td></td>
  1516. </tr>
  1517. <tr>
  1518. <td>13.</td>
  1519. <td>
  1520. <code>public class WordCount extends Configured implements Tool {</code>
  1521. </td>
  1522. </tr>
  1523. <tr>
  1524. <td>14.</td>
  1525. <td></td>
  1526. </tr>
  1527. <tr>
  1528. <td>15.</td>
  1529. <td>
  1530. &nbsp;&nbsp;
  1531. <code>
  1532. public static class Map extends MapReduceBase
  1533. implements Mapper&lt;LongWritable, Text, Text, IntWritable&gt; {
  1534. </code>
  1535. </td>
  1536. </tr>
  1537. <tr>
  1538. <td>16.</td>
  1539. <td></td>
  1540. </tr>
  1541. <tr>
  1542. <td>17.</td>
  1543. <td>
  1544. &nbsp;&nbsp;&nbsp;&nbsp;
  1545. <code>
  1546. static enum Counters { INPUT_WORDS }
  1547. </code>
  1548. </td>
  1549. </tr>
  1550. <tr>
  1551. <td>18.</td>
  1552. <td></td>
  1553. </tr>
  1554. <tr>
  1555. <td>19.</td>
  1556. <td>
  1557. &nbsp;&nbsp;&nbsp;&nbsp;
  1558. <code>
  1559. private final static IntWritable one = new IntWritable(1);
  1560. </code>
  1561. </td>
  1562. </tr>
  1563. <tr>
  1564. <td>20.</td>
  1565. <td>
  1566. &nbsp;&nbsp;&nbsp;&nbsp;
  1567. <code>private Text word = new Text();</code>
  1568. </td>
  1569. </tr>
  1570. <tr>
  1571. <td>21.</td>
  1572. <td></td>
  1573. </tr>
  1574. <tr>
  1575. <td>22.</td>
  1576. <td>
  1577. &nbsp;&nbsp;&nbsp;&nbsp;
  1578. <code>private boolean caseSensitive = true;</code>
  1579. </td>
  1580. </tr>
  1581. <tr>
  1582. <td>23.</td>
  1583. <td>
  1584. &nbsp;&nbsp;&nbsp;&nbsp;
  1585. <code>private Set&lt;String&gt; patternsToSkip = new HashSet&lt;String&gt;();</code>
  1586. </td>
  1587. </tr>
  1588. <tr>
  1589. <td>24.</td>
  1590. <td></td>
  1591. </tr>
  1592. <tr>
  1593. <td>25.</td>
  1594. <td>
  1595. &nbsp;&nbsp;&nbsp;&nbsp;
  1596. <code>private long numRecords = 0;</code>
  1597. </td>
  1598. </tr>
  1599. <tr>
  1600. <td>26.</td>
  1601. <td>
  1602. &nbsp;&nbsp;&nbsp;&nbsp;
  1603. <code>private String inputFile;</code>
  1604. </td>
  1605. </tr>
  1606. <tr>
  1607. <td>27.</td>
  1608. <td></td>
  1609. </tr>
  1610. <tr>
  1611. <td>28.</td>
  1612. <td>
  1613. &nbsp;&nbsp;&nbsp;&nbsp;
  1614. <code>public void configure(JobConf job) {</code>
  1615. </td>
  1616. </tr>
  1617. <tr>
  1618. <td>29.</td>
  1619. <td>
  1620. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1621. <code>
  1622. caseSensitive = job.getBoolean("wordcount.case.sensitive", true);
  1623. </code>
  1624. </td>
  1625. </tr>
  1626. <tr>
  1627. <td>30.</td>
  1628. <td>
  1629. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1630. <code>inputFile = job.get("map.input.file");</code>
  1631. </td>
  1632. </tr>
  1633. <tr>
  1634. <td>31.</td>
  1635. <td></td>
  1636. </tr>
  1637. <tr>
  1638. <td>32.</td>
  1639. <td>
  1640. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1641. <code>if (job.getBoolean("wordcount.skip.patterns", false)) {</code>
  1642. </td>
  1643. </tr>
  1644. <tr>
  1645. <td>33.</td>
  1646. <td>
  1647. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1648. <code>Path[] patternsFiles = new Path[0];</code>
  1649. </td>
  1650. </tr>
  1651. <tr>
  1652. <td>34.</td>
  1653. <td>
  1654. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1655. <code>try {</code>
  1656. </td>
  1657. </tr>
  1658. <tr>
  1659. <td>35.</td>
  1660. <td>
  1661. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1662. <code>
  1663. patternsFiles = DistributedCache.getLocalCacheFiles(job);
  1664. </code>
  1665. </td>
  1666. </tr>
  1667. <tr>
  1668. <td>36.</td>
  1669. <td>
  1670. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1671. <code>} catch (IOException ioe) {</code>
  1672. </td>
  1673. </tr>
  1674. <tr>
  1675. <td>37.</td>
  1676. <td>
  1677. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1678. <code>
  1679. System.err.println("Caught exception while getting cached files: "
  1680. + StringUtils.stringifyException(ioe));
  1681. </code>
  1682. </td>
  1683. </tr>
  1684. <tr>
  1685. <td>38.</td>
  1686. <td>
  1687. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1688. <code>}</code>
  1689. </td>
  1690. </tr>
  1691. <tr>
  1692. <td>39.</td>
  1693. <td>
  1694. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1695. <code>for (Path patternsFile : patternsFiles) {</code>
  1696. </td>
  1697. </tr>
  1698. <tr>
  1699. <td>40.</td>
  1700. <td>
  1701. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1702. <code>parseSkipFile(patternsFile);</code>
  1703. </td>
  1704. </tr>
  1705. <tr>
  1706. <td>41.</td>
  1707. <td>
  1708. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1709. <code>}</code>
  1710. </td>
  1711. </tr>
  1712. <tr>
  1713. <td>42.</td>
  1714. <td>
  1715. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1716. <code>}</code>
  1717. </td>
  1718. </tr>
  1719. <tr>
  1720. <td>43.</td>
  1721. <td>
  1722. &nbsp;&nbsp;&nbsp;&nbsp;
  1723. <code>}</code>
  1724. </td>
  1725. </tr>
  1726. <tr>
  1727. <td>44.</td>
  1728. <td></td>
  1729. </tr>
  1730. <tr>
  1731. <td>45.</td>
  1732. <td>
  1733. &nbsp;&nbsp;&nbsp;&nbsp;
  1734. <code>private void parseSkipFile(Path patternsFile) {</code>
  1735. </td>
  1736. </tr>
  1737. <tr>
  1738. <td>46.</td>
  1739. <td>
  1740. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1741. <code>try {</code>
  1742. </td>
  1743. </tr>
  1744. <tr>
  1745. <td>47.</td>
  1746. <td>
  1747. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1748. <code>
  1749. BufferedReader fis =
  1750. new BufferedReader(new FileReader(patternsFile.toString()));
  1751. </code>
  1752. </td>
  1753. </tr>
  1754. <tr>
  1755. <td>48.</td>
  1756. <td>
  1757. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1758. <code>String pattern = null;</code>
  1759. </td>
  1760. </tr>
  1761. <tr>
  1762. <td>49.</td>
  1763. <td>
  1764. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1765. <code>while ((pattern = fis.readLine()) != null) {</code>
  1766. </td>
  1767. </tr>
  1768. <tr>
  1769. <td>50.</td>
  1770. <td>
  1771. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1772. <code>patternsToSkip.add(pattern);</code>
  1773. </td>
  1774. </tr>
  1775. <tr>
  1776. <td>51.</td>
  1777. <td>
  1778. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1779. <code>}</code>
  1780. </td>
  1781. </tr>
  1782. <tr>
  1783. <td>52.</td>
  1784. <td>
  1785. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1786. <code>} catch (IOException ioe) {</code>
  1787. </td>
  1788. </tr>
  1789. <tr>
  1790. <td>53.</td>
  1791. <td>
  1792. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1793. <code>
  1794. System.err.println("Caught exception while parsing the cached file '" +
  1795. patternsFile + "' : " +
  1796. StringUtils.stringifyException(ioe));
  1797. </code>
  1798. </td>
  1799. </tr>
  1800. <tr>
  1801. <td>54.</td>
  1802. <td>
  1803. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1804. <code>}</code>
  1805. </td>
  1806. </tr>
  1807. <tr>
  1808. <td>55.</td>
  1809. <td>
  1810. &nbsp;&nbsp;&nbsp;&nbsp;
  1811. <code>}</code>
  1812. </td>
  1813. </tr>
  1814. <tr>
  1815. <td>56.</td>
  1816. <td></td>
  1817. </tr>
  1818. <tr>
  1819. <td>57.</td>
  1820. <td>
  1821. &nbsp;&nbsp;&nbsp;&nbsp;
  1822. <code>
  1823. public void map(LongWritable key, Text value,
  1824. OutputCollector&lt;Text, IntWritable&gt; output,
  1825. Reporter reporter) throws IOException {
  1826. </code>
  1827. </td>
  1828. </tr>
  1829. <tr>
  1830. <td>58.</td>
  1831. <td>
  1832. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1833. <code>
  1834. String line =
  1835. (caseSensitive) ? value.toString() :
  1836. value.toString().toLowerCase();
  1837. </code>
  1838. </td>
  1839. </tr>
  1840. <tr>
  1841. <td>59.</td>
  1842. <td></td>
  1843. </tr>
  1844. <tr>
  1845. <td>60.</td>
  1846. <td>
  1847. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1848. <code>for (String pattern : patternsToSkip) {</code>
  1849. </td>
  1850. </tr>
  1851. <tr>
  1852. <td>61.</td>
  1853. <td>
  1854. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1855. <code>line = line.replaceAll(pattern, "");</code>
  1856. </td>
  1857. </tr>
  1858. <tr>
  1859. <td>62.</td>
  1860. <td>
  1861. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1862. <code>}</code>
  1863. </td>
  1864. </tr>
  1865. <tr>
  1866. <td>63.</td>
  1867. <td></td>
  1868. </tr>
  1869. <tr>
  1870. <td>64.</td>
  1871. <td>
  1872. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1873. <code>StringTokenizer tokenizer = new StringTokenizer(line);</code>
  1874. </td>
  1875. </tr>
  1876. <tr>
  1877. <td>65.</td>
  1878. <td>
  1879. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1880. <code>while (tokenizer.hasMoreTokens()) {</code>
  1881. </td>
  1882. </tr>
  1883. <tr>
  1884. <td>66.</td>
  1885. <td>
  1886. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1887. <code>word.set(tokenizer.nextToken());</code>
  1888. </td>
  1889. </tr>
  1890. <tr>
  1891. <td>67.</td>
  1892. <td>
  1893. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1894. <code>output.collect(word, one);</code>
  1895. </td>
  1896. </tr>
  1897. <tr>
  1898. <td>68.</td>
  1899. <td>
  1900. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1901. <code>reporter.incrCounter(Counters.INPUT_WORDS, 1);</code>
  1902. </td>
  1903. </tr>
  1904. <tr>
  1905. <td>69.</td>
  1906. <td>
  1907. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1908. <code>}</code>
  1909. </td>
  1910. </tr>
  1911. <tr>
  1912. <td>70.</td>
  1913. <td></td>
  1914. </tr>
  1915. <tr>
  1916. <td>71.</td>
  1917. <td>
  1918. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1919. <code>if ((++numRecords % 100) == 0) {</code>
  1920. </td>
  1921. </tr>
  1922. <tr>
  1923. <td>72.</td>
  1924. <td>
  1925. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1926. <code>
  1927. reporter.setStatus("Finished processing " + numRecords +
  1928. " records " + "from the input file: " +
  1929. inputFile);
  1930. </code>
  1931. </td>
  1932. </tr>
  1933. <tr>
  1934. <td>73.</td>
  1935. <td>
  1936. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1937. <code>}</code>
  1938. </td>
  1939. </tr>
  1940. <tr>
  1941. <td>74.</td>
  1942. <td>
  1943. &nbsp;&nbsp;&nbsp;&nbsp;
  1944. <code>}</code>
  1945. </td>
  1946. </tr>
  1947. <tr>
  1948. <td>75.</td>
  1949. <td>
  1950. &nbsp;&nbsp;
  1951. <code>}</code>
  1952. </td>
  1953. </tr>
  1954. <tr>
  1955. <td>76.</td>
  1956. <td></td>
  1957. </tr>
  1958. <tr>
  1959. <td>77.</td>
  1960. <td>
  1961. &nbsp;&nbsp;
  1962. <code>
  1963. public static class Reduce extends MapReduceBase implements
  1964. Reducer&lt;Text, IntWritable, Text, IntWritable&gt; {
  1965. </code>
  1966. </td>
  1967. </tr>
  1968. <tr>
  1969. <td>78.</td>
  1970. <td>
  1971. &nbsp;&nbsp;&nbsp;&nbsp;
  1972. <code>
  1973. public void reduce(Text key, Iterator&lt;IntWritable&gt; values,
  1974. OutputCollector&lt;Text, IntWritable&gt; output,
  1975. Reporter reporter) throws IOException {
  1976. </code>
  1977. </td>
  1978. </tr>
  1979. <tr>
  1980. <td>79.</td>
  1981. <td>
  1982. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1983. <code>int sum = 0;</code>
  1984. </td>
  1985. </tr>
  1986. <tr>
  1987. <td>80.</td>
  1988. <td>
  1989. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1990. <code>while (values.hasNext()) {</code>
  1991. </td>
  1992. </tr>
  1993. <tr>
  1994. <td>81.</td>
  1995. <td>
  1996. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  1997. <code>sum += values.next().get();</code>
  1998. </td>
  1999. </tr>
  2000. <tr>
  2001. <td>82.</td>
  2002. <td>
  2003. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  2004. <code>}</code>
  2005. </td>
  2006. </tr>
  2007. <tr>
  2008. <td>83.</td>
  2009. <td>
  2010. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  2011. <code>output.collect(key, new IntWritable(sum));</code>
  2012. </td>
  2013. </tr>
  2014. <tr>
  2015. <td>84.</td>
  2016. <td>
  2017. &nbsp;&nbsp;&nbsp;&nbsp;
  2018. <code>}</code>
  2019. </td>
  2020. </tr>
  2021. <tr>
  2022. <td>85.</td>
  2023. <td>
  2024. &nbsp;&nbsp;
  2025. <code>}</code>
  2026. </td>
  2027. </tr>
  2028. <tr>
  2029. <td>86.</td>
  2030. <td></td>
  2031. </tr>
  2032. <tr>
  2033. <td>87.</td>
  2034. <td>
  2035. &nbsp;&nbsp;
  2036. <code>public int run(String[] args) throws Exception {</code>
  2037. </td>
  2038. </tr>
  2039. <tr>
  2040. <td>88.</td>
  2041. <td>
  2042. &nbsp;&nbsp;&nbsp;&nbsp;
  2043. <code>
  2044. JobConf conf = new JobConf(getConf(), WordCount.class);
  2045. </code>
  2046. </td>
  2047. </tr>
  2048. <tr>
  2049. <td>89.</td>
  2050. <td>
  2051. &nbsp;&nbsp;&nbsp;&nbsp;
  2052. <code>conf.setJobName("wordcount");</code>
  2053. </td>
  2054. </tr>
  2055. <tr>
  2056. <td>90.</td>
  2057. <td></td>
  2058. </tr>
  2059. <tr>
  2060. <td>91.</td>
  2061. <td>
  2062. &nbsp;&nbsp;&nbsp;&nbsp;
  2063. <code>conf.setOutputKeyClass(Text.class);</code>
  2064. </td>
  2065. </tr>
  2066. <tr>
  2067. <td>92.</td>
  2068. <td>
  2069. &nbsp;&nbsp;&nbsp;&nbsp;
  2070. <code>conf.setOutputValueClass(IntWritable.class);</code>
  2071. </td>
  2072. </tr>
  2073. <tr>
  2074. <td>93.</td>
  2075. <td></td>
  2076. </tr>
  2077. <tr>
  2078. <td>94.</td>
  2079. <td>
  2080. &nbsp;&nbsp;&nbsp;&nbsp;
  2081. <code>conf.setMapperClass(Map.class);</code>
  2082. </td>
  2083. </tr>
  2084. <tr>
  2085. <td>95.</td>
  2086. <td>
  2087. &nbsp;&nbsp;&nbsp;&nbsp;
  2088. <code>conf.setCombinerClass(Reduce.class);</code>
  2089. </td>
  2090. </tr>
  2091. <tr>
  2092. <td>96.</td>
  2093. <td>
  2094. &nbsp;&nbsp;&nbsp;&nbsp;
  2095. <code>conf.setReducerClass(Reduce.class);</code>
  2096. </td>
  2097. </tr>
  2098. <tr>
  2099. <td>97.</td>
  2100. <td></td>
  2101. </tr>
  2102. <tr>
  2103. <td>98.</td>
  2104. <td>
  2105. &nbsp;&nbsp;&nbsp;&nbsp;
  2106. <code>conf.setInputFormat(TextInputFormat.class);</code>
  2107. </td>
  2108. </tr>
  2109. <tr>
  2110. <td>99.</td>
  2111. <td>
  2112. &nbsp;&nbsp;&nbsp;&nbsp;
  2113. <code>conf.setOutputFormat(TextOutputFormat.class);</code>
  2114. </td>
  2115. </tr>
  2116. <tr>
  2117. <td>100.</td>
  2118. <td></td>
  2119. </tr>
  2120. <tr>
  2121. <td>101.</td>
  2122. <td>
  2123. &nbsp;&nbsp;&nbsp;&nbsp;
  2124. <code>
  2125. List&lt;String&gt; other_args = new ArrayList&lt;String&gt;();
  2126. </code>
  2127. </td>
  2128. </tr>
  2129. <tr>
  2130. <td>102.</td>
  2131. <td>
  2132. &nbsp;&nbsp;&nbsp;&nbsp;
  2133. <code>for (int i=0; i &lt; args.length; ++i) {</code>
  2134. </td>
  2135. </tr>
  2136. <tr>
  2137. <td>103.</td>
  2138. <td>
  2139. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  2140. <code>if ("-skip".equals(args[i])) {</code>
  2141. </td>
  2142. </tr>
  2143. <tr>
  2144. <td>104.</td>
  2145. <td>
  2146. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  2147. <code>
  2148. DistributedCache.addCacheFile(new Path(args[++i]).toUri(), conf);
  2149. </code>
  2150. </td>
  2151. </tr>
  2152. <tr>
  2153. <td>105.</td>
  2154. <td>
  2155. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  2156. <code>
  2157. conf.setBoolean("wordcount.skip.patterns", true);
  2158. </code>
  2159. </td>
  2160. </tr>
  2161. <tr>
  2162. <td>106.</td>
  2163. <td>
  2164. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  2165. <code>} else {</code>
  2166. </td>
  2167. </tr>
  2168. <tr>
  2169. <td>107.</td>
  2170. <td>
  2171. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  2172. <code>other_args.add(args[i]);</code>
  2173. </td>
  2174. </tr>
  2175. <tr>
  2176. <td>108.</td>
  2177. <td>
  2178. &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
  2179. <code>}</code>
  2180. </td>
  2181. </tr>
  2182. <tr>
  2183. <td>109.</td>
  2184. <td>
  2185. &nbsp;&nbsp;&nbsp;&nbsp;
  2186. <code>}</code>
  2187. </td>
  2188. </tr>
  2189. <tr>
  2190. <td>110.</td>
  2191. <td></td>
  2192. </tr>
  2193. <tr>
  2194. <td>111.</td>
  2195. <td>
  2196. &nbsp;&nbsp;&nbsp;&nbsp;
  2197. <code>FileInputFormat.setInputPaths(conf, new Path(other_args.get(0)));</code>
  2198. </td>
  2199. </tr>
  2200. <tr>
  2201. <td>112.</td>
  2202. <td>
  2203. &nbsp;&nbsp;&nbsp;&nbsp;
  2204. <code>FileOutputFormat.setOutputPath(conf, new Path(other_args.get(1)));</code>
  2205. </td>
  2206. </tr>
  2207. <tr>
  2208. <td>113.</td>
  2209. <td></td>
  2210. </tr>
  2211. <tr>
  2212. <td>114.</td>
  2213. <td>
  2214. &nbsp;&nbsp;&nbsp;&nbsp;
  2215. <code>JobClient.runJob(conf);</code>
  2216. </td>
  2217. </tr>
  2218. <tr>
  2219. <td>115.</td>
  2220. <td>
  2221. &nbsp;&nbsp;&nbsp;&nbsp;
  2222. <code>return 0;</code>
  2223. </td>
  2224. </tr>
  2225. <tr>
  2226. <td>116.</td>
  2227. <td>
  2228. &nbsp;&nbsp;
  2229. <code>}</code>
  2230. </td>
  2231. </tr>
  2232. <tr>
  2233. <td>117.</td>
  2234. <td></td>
  2235. </tr>
  2236. <tr>
  2237. <td>118.</td>
  2238. <td>
  2239. &nbsp;&nbsp;
  2240. <code>
  2241. public static void main(String[] args) throws Exception {
  2242. </code>
  2243. </td>
  2244. </tr>
  2245. <tr>
  2246. <td>119.</td>
  2247. <td>
  2248. &nbsp;&nbsp;&nbsp;&nbsp;
  2249. <code>
  2250. int res = ToolRunner.run(new Configuration(), new WordCount(),
  2251. args);
  2252. </code>
  2253. </td>
  2254. </tr>
  2255. <tr>
  2256. <td>120.</td>
  2257. <td>
  2258. &nbsp;&nbsp;&nbsp;&nbsp;
  2259. <code>System.exit(res);</code>
  2260. </td>
  2261. </tr>
  2262. <tr>
  2263. <td>121.</td>
  2264. <td>
  2265. &nbsp;&nbsp;
  2266. <code>}</code>
  2267. </td>
  2268. </tr>
  2269. <tr>
  2270. <td>122.</td>
  2271. <td>
  2272. <code>}</code>
  2273. </td>
  2274. </tr>
  2275. <tr>
  2276. <td>123.</td>
  2277. <td></td>
  2278. </tr>
  2279. </table>
  2280. </section>
  2281. <section>
  2282. <title>Sample Runs</title>
  2283. <p>Sample text-files as input:</p>
  2284. <p>
  2285. <code>$ bin/hadoop dfs -ls /usr/joe/wordcount/input/</code><br/>
  2286. <code>/usr/joe/wordcount/input/file01</code><br/>
  2287. <code>/usr/joe/wordcount/input/file02</code><br/>
  2288. <br/>
  2289. <code>$ bin/hadoop dfs -cat /usr/joe/wordcount/input/file01</code><br/>
  2290. <code>Hello World, Bye World!</code><br/>
  2291. <br/>
  2292. <code>$ bin/hadoop dfs -cat /usr/joe/wordcount/input/file02</code><br/>
  2293. <code>Hello Hadoop, Goodbye to hadoop.</code>
  2294. </p>
  2295. <p>Run the application:</p>
  2296. <p>
  2297. <code>
  2298. $ bin/hadoop jar /usr/joe/wordcount.jar org.myorg.WordCount
  2299. /usr/joe/wordcount/input /usr/joe/wordcount/output
  2300. </code>
  2301. </p>
  2302. <p>Output:</p>
  2303. <p>
  2304. <code>
  2305. $ bin/hadoop dfs -cat /usr/joe/wordcount/output/part-00000
  2306. </code>
  2307. <br/>
  2308. <code>Bye 1</code><br/>
  2309. <code>Goodbye 1</code><br/>
  2310. <code>Hadoop, 1</code><br/>
  2311. <code>Hello 2</code><br/>
  2312. <code>World! 1</code><br/>
  2313. <code>World, 1</code><br/>
  2314. <code>hadoop. 1</code><br/>
  2315. <code>to 1</code><br/>
  2316. </p>
  2317. <p>Notice that the inputs differ from the first version we looked at,
  2318. and how they affect the outputs.</p>
  2319. <p>Now, lets plug-in a pattern-file which lists the word-patterns to be
  2320. ignored, via the <code>DistributedCache</code>.</p>
  2321. <p>
  2322. <code>$ hadoop dfs -cat /user/joe/wordcount/patterns.txt</code><br/>
  2323. <code>\.</code><br/>
  2324. <code>\,</code><br/>
  2325. <code>\!</code><br/>
  2326. <code>to</code><br/>
  2327. </p>
  2328. <p>Run it again, this time with more options:</p>
  2329. <p>
  2330. <code>
  2331. $ bin/hadoop jar /usr/joe/wordcount.jar org.myorg.WordCount
  2332. -Dwordcount.case.sensitive=true /usr/joe/wordcount/input
  2333. /usr/joe/wordcount/output -skip /user/joe/wordcount/patterns.txt
  2334. </code>
  2335. </p>
  2336. <p>As expected, the output:</p>
  2337. <p>
  2338. <code>
  2339. $ bin/hadoop dfs -cat /usr/joe/wordcount/output/part-00000
  2340. </code>
  2341. <br/>
  2342. <code>Bye 1</code><br/>
  2343. <code>Goodbye 1</code><br/>
  2344. <code>Hadoop 1</code><br/>
  2345. <code>Hello 2</code><br/>
  2346. <code>World 2</code><br/>
  2347. <code>hadoop 1</code><br/>
  2348. </p>
  2349. <p>Run it once more, this time switch-off case-sensitivity:</p>
  2350. <p>
  2351. <code>
  2352. $ bin/hadoop jar /usr/joe/wordcount.jar org.myorg.WordCount
  2353. -Dwordcount.case.sensitive=false /usr/joe/wordcount/input
  2354. /usr/joe/wordcount/output -skip /user/joe/wordcount/patterns.txt
  2355. </code>
  2356. </p>
  2357. <p>Sure enough, the output:</p>
  2358. <p>
  2359. <code>
  2360. $ bin/hadoop dfs -cat /usr/joe/wordcount/output/part-00000
  2361. </code>
  2362. <br/>
  2363. <code>bye 1</code><br/>
  2364. <code>goodbye 1</code><br/>
  2365. <code>hadoop 2</code><br/>
  2366. <code>hello 2</code><br/>
  2367. <code>world 2</code><br/>
  2368. </p>
  2369. </section>
  2370. <section>
  2371. <title>Highlights</title>
  2372. <p>The second version of <code>WordCount</code> improves upon the
  2373. previous one by using some features offered by the Map-Reduce framework:
  2374. </p>
  2375. <ul>
  2376. <li>
  2377. Demonstrates how applications can access configuration parameters
  2378. in the <code>configure</code> method of the <code>Mapper</code> (and
  2379. <code>Reducer</code>) implementations (lines 28-43).
  2380. </li>
  2381. <li>
  2382. Demonstrates how the <code>DistributedCache</code> can be used to
  2383. distribute read-only data needed by the jobs. Here it allows the user
  2384. to specify word-patterns to skip while counting (line 104).
  2385. </li>
  2386. <li>
  2387. Demonstrates the utility of the <code>Tool</code> interface and the
  2388. <code>GenericOptionsParser</code> to handle generic Hadoop
  2389. command-line options (lines 87-116, 119).
  2390. </li>
  2391. <li>
  2392. Demonstrates how applications can use <code>Counters</code> (line 68)
  2393. and how they can set application-specific status information via
  2394. the <code>Reporter</code> instance passed to the <code>map</code> (and
  2395. <code>reduce</code>) method (line 72).
  2396. </li>
  2397. </ul>
  2398. </section>
  2399. </section>
  2400. <p>
  2401. <em>Java and JNI are trademarks or registered trademarks of
  2402. Sun Microsystems, Inc. in the United States and other countries.</em>
  2403. </p>
  2404. </body>
  2405. </document>