hod_admin_guide.html 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656
  1. <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
  2. <html>
  3. <head>
  4. <META http-equiv="Content-Type" content="text/html; charset=UTF-8">
  5. <meta content="Apache Forrest" name="Generator">
  6. <meta name="Forrest-version" content="0.8">
  7. <meta name="Forrest-skin-name" content="pelt">
  8. <title>
  9. Hadoop On Demand
  10. </title>
  11. <link type="text/css" href="skin/basic.css" rel="stylesheet">
  12. <link media="screen" type="text/css" href="skin/screen.css" rel="stylesheet">
  13. <link media="print" type="text/css" href="skin/print.css" rel="stylesheet">
  14. <link type="text/css" href="skin/profile.css" rel="stylesheet">
  15. <script src="skin/getBlank.js" language="javascript" type="text/javascript"></script><script src="skin/getMenu.js" language="javascript" type="text/javascript"></script><script src="skin/fontsize.js" language="javascript" type="text/javascript"></script>
  16. <link rel="shortcut icon" href="images/favicon.ico">
  17. </head>
  18. <body onload="init()">
  19. <script type="text/javascript">ndeSetTextSize();</script>
  20. <div id="top">
  21. <!--+
  22. |breadtrail
  23. +-->
  24. <div class="breadtrail">
  25. <a href="http://www.apache.org/">Apache</a> &gt; <a href="http://hadoop.apache.org/">Hadoop</a> &gt; <a href="http://hadoop.apache.org/core/">Core</a><script src="skin/breadcrumbs.js" language="JavaScript" type="text/javascript"></script>
  26. </div>
  27. <!--+
  28. |header
  29. +-->
  30. <div class="header">
  31. <!--+
  32. |start group logo
  33. +-->
  34. <div class="grouplogo">
  35. <a href="http://hadoop.apache.org/"><img class="logoImage" alt="Hadoop" src="images/hadoop-logo.jpg" title="Apache Hadoop"></a>
  36. </div>
  37. <!--+
  38. |end group logo
  39. +-->
  40. <!--+
  41. |start Project Logo
  42. +-->
  43. <div class="projectlogo">
  44. <a href="http://hadoop.apache.org/core/"><img class="logoImage" alt="Hadoop" src="images/core-logo.gif" title="Scalable Computing Platform"></a>
  45. </div>
  46. <!--+
  47. |end Project Logo
  48. +-->
  49. <!--+
  50. |start Search
  51. +-->
  52. <div class="searchbox">
  53. <form action="http://www.google.com/search" method="get" class="roundtopsmall">
  54. <input value="hadoop.apache.org" name="sitesearch" type="hidden"><input onFocus="getBlank (this, 'Search the site with google');" size="25" name="q" id="query" type="text" value="Search the site with google">&nbsp;
  55. <input name="Search" value="Search" type="submit">
  56. </form>
  57. </div>
  58. <!--+
  59. |end search
  60. +-->
  61. <!--+
  62. |start Tabs
  63. +-->
  64. <ul id="tabs">
  65. <li>
  66. <a class="unselected" href="http://hadoop.apache.org/core/">Project</a>
  67. </li>
  68. <li>
  69. <a class="unselected" href="http://wiki.apache.org/hadoop">Wiki</a>
  70. </li>
  71. <li class="current">
  72. <a class="selected" href="index.html">Hadoop 0.19 Documentation</a>
  73. </li>
  74. </ul>
  75. <!--+
  76. |end Tabs
  77. +-->
  78. </div>
  79. </div>
  80. <div id="main">
  81. <div id="publishedStrip">
  82. <!--+
  83. |start Subtabs
  84. +-->
  85. <div id="level2tabs"></div>
  86. <!--+
  87. |end Endtabs
  88. +-->
  89. <script type="text/javascript"><!--
  90. document.write("Last Published: " + document.lastModified);
  91. // --></script>
  92. </div>
  93. <!--+
  94. |breadtrail
  95. +-->
  96. <div class="breadtrail">
  97. &nbsp;
  98. </div>
  99. <!--+
  100. |start Menu, mainarea
  101. +-->
  102. <!--+
  103. |start Menu
  104. +-->
  105. <div id="menu">
  106. <div onclick="SwitchMenu('menu_1.1', 'skin/')" id="menu_1.1Title" class="menutitle">Documentation</div>
  107. <div id="menu_1.1" class="menuitemgroup">
  108. <div class="menuitem">
  109. <a href="index.html">Overview</a>
  110. </div>
  111. <div class="menuitem">
  112. <a href="quickstart.html">Quickstart</a>
  113. </div>
  114. <div class="menuitem">
  115. <a href="cluster_setup.html">Cluster Setup</a>
  116. </div>
  117. <div class="menuitem">
  118. <a href="hdfs_design.html">HDFS Architecture</a>
  119. </div>
  120. <div class="menuitem">
  121. <a href="hdfs_user_guide.html">HDFS User Guide</a>
  122. </div>
  123. <div class="menuitem">
  124. <a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
  125. </div>
  126. <div class="menuitem">
  127. <a href="hdfs_quota_admin_guide.html">HDFS Quotas Administrator Guide</a>
  128. </div>
  129. <div class="menuitem">
  130. <a href="hdfs_shell.html">FS Shell Guide</a>
  131. </div>
  132. <div class="menuitem">
  133. <a href="distcp.html">DistCp Guide</a>
  134. </div>
  135. <div class="menuitem">
  136. <a href="mapred_tutorial.html">Map-Reduce Tutorial</a>
  137. </div>
  138. <div class="menuitem">
  139. <a href="native_libraries.html">Native Hadoop Libraries</a>
  140. </div>
  141. <div class="menuitem">
  142. <a href="streaming.html">Streaming</a>
  143. </div>
  144. <div class="menuitem">
  145. <a href="hadoop_archives.html">Hadoop Archives</a>
  146. </div>
  147. <div class="menuitem">
  148. <a href="hod.html">Hadoop On Demand</a>
  149. </div>
  150. <div class="menuitem">
  151. <a href="api/index.html">API Docs</a>
  152. </div>
  153. <div class="menuitem">
  154. <a href="http://wiki.apache.org/hadoop/">Wiki</a>
  155. </div>
  156. <div class="menuitem">
  157. <a href="http://wiki.apache.org/hadoop/FAQ">FAQ</a>
  158. </div>
  159. <div class="menuitem">
  160. <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
  161. </div>
  162. <div class="menuitem">
  163. <a href="releasenotes.html">Release Notes</a>
  164. </div>
  165. <div class="menuitem">
  166. <a href="changes.html">All Changes</a>
  167. </div>
  168. </div>
  169. <div id="credit"></div>
  170. <div id="roundbottom">
  171. <img style="display: none" class="corner" height="15" width="15" alt="" src="skin/images/rc-b-l-15-1body-2menu-3menu.png"></div>
  172. <!--+
  173. |alternative credits
  174. +-->
  175. <div id="credit2"></div>
  176. </div>
  177. <!--+
  178. |end Menu
  179. +-->
  180. <!--+
  181. |start content
  182. +-->
  183. <div id="content">
  184. <div title="Portable Document Format" class="pdflink">
  185. <a class="dida" href="hod_admin_guide.pdf"><img alt="PDF -icon" src="skin/images/pdfdoc.gif" class="skin"><br>
  186. PDF</a>
  187. </div>
  188. <h1>
  189. Hadoop On Demand
  190. </h1>
  191. <div id="minitoc-area">
  192. <ul class="minitoc">
  193. <li>
  194. <a href="#Overview">Overview</a>
  195. </li>
  196. <li>
  197. <a href="#Pre-requisites">Pre-requisites</a>
  198. </li>
  199. <li>
  200. <a href="#Resource+Manager">Resource Manager</a>
  201. </li>
  202. <li>
  203. <a href="#Installing+HOD">Installing HOD</a>
  204. </li>
  205. <li>
  206. <a href="#Configuring+HOD">Configuring HOD</a>
  207. <ul class="minitoc">
  208. <li>
  209. <a href="#Minimal+Configuration+to+get+started">Minimal Configuration to get started</a>
  210. </li>
  211. <li>
  212. <a href="#Advanced+Configuration">Advanced Configuration</a>
  213. </li>
  214. </ul>
  215. </li>
  216. <li>
  217. <a href="#Running+HOD">Running HOD</a>
  218. </li>
  219. <li>
  220. <a href="#Supporting+Tools+and+Utilities">Supporting Tools and Utilities</a>
  221. <ul class="minitoc">
  222. <li>
  223. <a href="#logcondense.py+-+Tool+for+removing+log+files+uploaded+to+DFS">logcondense.py - Tool for removing log files uploaded to DFS</a>
  224. <ul class="minitoc">
  225. <li>
  226. <a href="#Running+logcondense.py">Running logcondense.py</a>
  227. </li>
  228. <li>
  229. <a href="#Command+Line+Options+for+logcondense.py">Command Line Options for logcondense.py</a>
  230. </li>
  231. </ul>
  232. </li>
  233. <li>
  234. <a href="#checklimits.sh+-+Tool+to+update+torque+comment+field+reflecting+resource+limits">checklimits.sh - Tool to update torque comment field reflecting resource limits</a>
  235. <ul class="minitoc">
  236. <li>
  237. <a href="#Running+checklimits.sh">Running checklimits.sh</a>
  238. </li>
  239. </ul>
  240. </li>
  241. </ul>
  242. </li>
  243. </ul>
  244. </div>
  245. <a name="N1000C"></a><a name="Overview"></a>
  246. <h2 class="h3">Overview</h2>
  247. <div class="section">
  248. <p>The Hadoop On Demand (HOD) project is a system for provisioning and
  249. managing independent Hadoop MapReduce and HDFS instances on a shared cluster
  250. of nodes. HOD is a tool that makes it easy for administrators and users to
  251. quickly setup and use Hadoop. It is also a very useful tool for Hadoop developers
  252. and testers who need to share a physical cluster for testing their own Hadoop
  253. versions.
  254. </p>
  255. <p>HOD relies on a resource manager (RM) for allocation of nodes that it can use for
  256. running Hadoop instances. At present it runs with the <a href="http://www.clusterresources.com/pages/products/torque-resource-manager.php">Torque
  257. resource manager</a>.
  258. </p>
  259. <p>
  260. The basic system architecture of HOD includes components from:</p>
  261. <ul>
  262. <li>A Resource manager (possibly together with a scheduler),</li>
  263. <li>HOD components, and </li>
  264. <li>Hadoop Map/Reduce and HDFS daemons.</li>
  265. </ul>
  266. <p>
  267. HOD provisions and maintains Hadoop Map/Reduce and, optionally, HDFS instances
  268. through interaction with the above components on a given cluster of nodes. A cluster of
  269. nodes can be thought of as comprising of two sets of nodes:</p>
  270. <ul>
  271. <li>Submit nodes: Users use the HOD client on these nodes to allocate clusters, and then
  272. use the Hadoop client to submit Hadoop jobs. </li>
  273. <li>Compute nodes: Using the resource manager, HOD components are run on these nodes to
  274. provision the Hadoop daemons. After that Hadoop jobs run on them.</li>
  275. </ul>
  276. <p>
  277. Here is a brief description of the sequence of operations in allocating a cluster and
  278. running jobs on them.
  279. </p>
  280. <ul>
  281. <li>The user uses the HOD client on the Submit node to allocate a required number of
  282. cluster nodes, and provision Hadoop on them.</li>
  283. <li>The HOD client uses a Resource Manager interface, (qsub, in Torque), to submit a HOD
  284. process, called the RingMaster, as a Resource Manager job, requesting the user desired number
  285. of nodes. This job is submitted to the central server of the Resource Manager (pbs_server, in Torque).</li>
  286. <li>On the compute nodes, the resource manager slave daemons, (pbs_moms in Torque), accept
  287. and run jobs that they are given by the central server (pbs_server in Torque). The RingMaster
  288. process is started on one of the compute nodes (mother superior, in Torque).</li>
  289. <li>The Ringmaster then uses another Resource Manager interface, (pbsdsh, in Torque), to run
  290. the second HOD component, HodRing, as distributed tasks on each of the compute
  291. nodes allocated.</li>
  292. <li>The Hodrings, after initializing, communicate with the Ringmaster to get Hadoop commands,
  293. and run them accordingly. Once the Hadoop commands are started, they register with the RingMaster,
  294. giving information about the daemons.</li>
  295. <li>All the configuration files needed for Hadoop instances are generated by HOD itself,
  296. some obtained from options given by user in its own configuration file.</li>
  297. <li>The HOD client keeps communicating with the RingMaster to find out the location of the
  298. JobTracker and HDFS daemons.</li>
  299. </ul>
  300. <p>The rest of the document deals with the steps needed to setup HOD on a physical cluster of nodes.</p>
  301. </div>
  302. <a name="N10056"></a><a name="Pre-requisites"></a>
  303. <h2 class="h3">Pre-requisites</h2>
  304. <div class="section">
  305. <p>Operating System: HOD is currently tested on RHEL4.<br>
  306. Nodes : HOD requires a minimum of 3 nodes configured through a resource manager.<br>
  307. </p>
  308. <p> Software </p>
  309. <p>The following components are to be installed on *ALL* the nodes before using HOD:</p>
  310. <ul>
  311. <li>Torque: Resource manager</li>
  312. <li>
  313. <a href="http://www.python.org">Python</a> : HOD requires version 2.5.1 of Python.</li>
  314. </ul>
  315. <p>The following components can be optionally installed for getting better
  316. functionality from HOD:</p>
  317. <ul>
  318. <li>
  319. <a href="http://twistedmatrix.com/trac/">Twisted Python</a>: This can be
  320. used for improving the scalability of HOD. If this module is detected to be
  321. installed, HOD uses it, else it falls back to default modules.</li>
  322. <li>
  323. <a href="http://hadoop.apache.org/core/">Hadoop</a>: HOD can automatically
  324. distribute Hadoop to all nodes in the cluster. However, it can also use a
  325. pre-installed version of Hadoop, if it is available on all nodes in the cluster.
  326. HOD currently supports Hadoop 0.15 and above.</li>
  327. </ul>
  328. <p>NOTE: HOD configuration requires the location of installs of these
  329. components to be the same on all nodes in the cluster. It will also
  330. make the configuration simpler to have the same location on the submit
  331. nodes.
  332. </p>
  333. </div>
  334. <a name="N1008A"></a><a name="Resource+Manager"></a>
  335. <h2 class="h3">Resource Manager</h2>
  336. <div class="section">
  337. <p> Currently HOD works with the Torque resource manager, which it uses for its node
  338. allocation and job submission. Torque is an open source resource manager from
  339. <a href="http://www.clusterresources.com">Cluster Resources</a>, a community effort
  340. based on the PBS project. It provides control over batch jobs and distributed compute nodes. Torque is
  341. freely available for download from <a href="http://www.clusterresources.com/downloads/torque/">here</a>.
  342. </p>
  343. <p> All documentation related to torque can be seen under
  344. the section TORQUE Resource Manager <a href="http://www.clusterresources.com/pages/resources/documentation.php">here</a>. You can
  345. get wiki documentation from <a href="http://www.clusterresources.com/wiki/doku.php?id=torque:torque_wiki">here</a>.
  346. Users may wish to subscribe to TORQUE&rsquo;s mailing list or view the archive for questions,
  347. comments <a href="http://www.clusterresources.com/pages/resources/mailing-lists.php">here</a>.
  348. </p>
  349. <p>For using HOD with Torque:</p>
  350. <ul>
  351. <li>Install Torque components: pbs_server on one node(head node), pbs_mom on all
  352. compute nodes, and PBS client tools on all compute nodes and submit
  353. nodes. Perform atleast a basic configuration so that the Torque system is up and
  354. running i.e pbs_server knows which machines to talk to. Look <a href="http://www.clusterresources.com/wiki/doku.php?id=torque:1.2_basic_configuration">here</a>
  355. for basic configuration.
  356. For advanced configuration, see <a href="http://www.clusterresources.com/wiki/doku.php?id=torque:1.3_advanced_configuration">here</a>
  357. </li>
  358. <li>Create a queue for submitting jobs on the pbs_server. The name of the queue is the
  359. same as the HOD configuration parameter, resource-manager.queue. The Hod client uses this queue to
  360. submit the Ringmaster process as a Torque job.</li>
  361. <li>Specify a 'cluster name' as a 'property' for all nodes in the cluster.
  362. This can be done by using the 'qmgr' command. For example:
  363. qmgr -c "set node node properties=cluster-name". The name of the cluster is the same as
  364. the HOD configuration parameter, hod.cluster. </li>
  365. <li>Ensure that jobs can be submitted to the nodes. This can be done by
  366. using the 'qsub' command. For example:
  367. echo "sleep 30" | qsub -l nodes=3</li>
  368. </ul>
  369. </div>
  370. <a name="N100C4"></a><a name="Installing+HOD"></a>
  371. <h2 class="h3">Installing HOD</h2>
  372. <div class="section">
  373. <p>Now that the resource manager set up is done, we proceed on to obtaining and
  374. installing HOD.</p>
  375. <ul>
  376. <li>If you are getting HOD from the Hadoop tarball,it is available under the
  377. 'contrib' section of Hadoop, under the root directory 'hod'.</li>
  378. <li>If you are building from source, you can run ant tar from the Hadoop root
  379. directory, to generate the Hadoop tarball, and then pick HOD from there,
  380. as described in the point above.</li>
  381. <li>Distribute the files under this directory to all the nodes in the
  382. cluster. Note that the location where the files are copied should be
  383. the same on all the nodes.</li>
  384. <li>Note that compiling hadoop would build HOD with appropriate permissions
  385. set on all the required script files in HOD.</li>
  386. </ul>
  387. </div>
  388. <a name="N100DD"></a><a name="Configuring+HOD"></a>
  389. <h2 class="h3">Configuring HOD</h2>
  390. <div class="section">
  391. <p>After HOD installation is done, it has to be configured before we start using
  392. it.</p>
  393. <a name="N100E6"></a><a name="Minimal+Configuration+to+get+started"></a>
  394. <h3 class="h4">Minimal Configuration to get started</h3>
  395. <ul>
  396. <li>On the node from where you want to run hod, edit the file hodrc
  397. which can be found in the &lt;install dir&gt;/conf directory. This file
  398. contains the minimal set of values required for running hod.</li>
  399. <li>
  400. <p>Specify values suitable to your environment for the following
  401. variables defined in the configuration file. Note that some of these
  402. variables are defined at more than one place in the file.</p>
  403. <ul>
  404. <li>${JAVA_HOME}: Location of Java for Hadoop. Hadoop supports Sun JDK
  405. 1.5.x and above.</li>
  406. <li>${CLUSTER_NAME}: Name of the cluster which is specified in the
  407. 'node property' as mentioned in resource manager configuration.</li>
  408. <li>${HADOOP_HOME}: Location of Hadoop installation on the compute and
  409. submit nodes.</li>
  410. <li>${RM_QUEUE}: Queue configured for submiting jobs in the resource
  411. manager configuration.</li>
  412. <li>${RM_HOME}: Location of the resource manager installation on the
  413. compute and submit nodes.</li>
  414. </ul>
  415. </li>
  416. <li>
  417. <p>The following environment variables *may* need to be set depending on
  418. your environment. These variables must be defined where you run the
  419. HOD client, and also be specified in the HOD configuration file as the
  420. value of the key resource_manager.env-vars. Multiple variables can be
  421. specified as a comma separated list of key=value pairs.</p>
  422. <ul>
  423. <li>HOD_PYTHON_HOME: If you install python to a non-default location
  424. of the compute nodes, or submit nodes, then, this variable must be
  425. defined to point to the python executable in the non-standard
  426. location.</li>
  427. </ul>
  428. </li>
  429. </ul>
  430. <a name="N10117"></a><a name="Advanced+Configuration"></a>
  431. <h3 class="h4">Advanced Configuration</h3>
  432. <p> You can review other configuration options in the file and modify them to suit
  433. your needs. Refer to the <a href="hod_config_guide.html">Configuration Guide</a> for information about the HOD
  434. configuration.
  435. </p>
  436. </div>
  437. <a name="N10126"></a><a name="Running+HOD"></a>
  438. <h2 class="h3">Running HOD</h2>
  439. <div class="section">
  440. <p>You can now proceed to <a href="hod_user_guide.html">HOD User Guide</a> for information about how to run HOD,
  441. what are the various features, options and for help in trouble-shooting.</p>
  442. </div>
  443. <a name="N10134"></a><a name="Supporting+Tools+and+Utilities"></a>
  444. <h2 class="h3">Supporting Tools and Utilities</h2>
  445. <div class="section">
  446. <p>This section describes certain supporting tools and utilities that can be used in managing HOD deployments.</p>
  447. <a name="N1013D"></a><a name="logcondense.py+-+Tool+for+removing+log+files+uploaded+to+DFS"></a>
  448. <h3 class="h4">logcondense.py - Tool for removing log files uploaded to DFS</h3>
  449. <p>As mentioned in
  450. <a href="hod_user_guide.html#Collecting+and+Viewing+Hadoop+Logs">this section</a> of the
  451. <a href="hod_user_guide.html">HOD User Guide</a>, HOD can be configured to upload
  452. Hadoop logs to a statically configured HDFS. Over time, the number of logs uploaded
  453. to DFS could increase. logcondense.py is a tool that helps administrators to clean-up
  454. the log files older than a certain number of days. </p>
  455. <a name="N1014E"></a><a name="Running+logcondense.py"></a>
  456. <h4>Running logcondense.py</h4>
  457. <p>logcondense.py is available under hod_install_location/support folder. You can either
  458. run it using python, for e.g. <em>python logcondense.py</em>, or give execute permissions
  459. to the file, and directly run it as <em>logcondense.py</em>. logcondense.py needs to be
  460. run by a user who has sufficient permissions to remove files from locations where log
  461. files are uploaded in the DFS, if permissions are enabled. For e.g. as mentioned in the
  462. <a href="hod_config_guide.html#3.7+hodring+options">configuration guide</a>, the logs could
  463. be configured to come under the user's home directory in HDFS. In that case, the user
  464. running logcondense.py should have super user privileges to remove the files from under
  465. all user home directories.</p>
  466. <a name="N10162"></a><a name="Command+Line+Options+for+logcondense.py"></a>
  467. <h4>Command Line Options for logcondense.py</h4>
  468. <p>The following command line options are supported for logcondense.py.</p>
  469. <table class="ForrestTable" cellspacing="1" cellpadding="4">
  470. <tr>
  471. <td colspan="1" rowspan="1">Short Option</td>
  472. <td colspan="1" rowspan="1">Long option</td>
  473. <td colspan="1" rowspan="1">Meaning</td>
  474. <td colspan="1" rowspan="1">Example</td>
  475. </tr>
  476. <tr>
  477. <td colspan="1" rowspan="1">-p</td>
  478. <td colspan="1" rowspan="1">--package</td>
  479. <td colspan="1" rowspan="1">Complete path to the hadoop script. The version of hadoop must be the same as the
  480. one running HDFS.</td>
  481. <td colspan="1" rowspan="1">/usr/bin/hadoop</td>
  482. </tr>
  483. <tr>
  484. <td colspan="1" rowspan="1">-d</td>
  485. <td colspan="1" rowspan="1">--days</td>
  486. <td colspan="1" rowspan="1">Delete log files older than the specified number of days</td>
  487. <td colspan="1" rowspan="1">7</td>
  488. </tr>
  489. <tr>
  490. <td colspan="1" rowspan="1">-c</td>
  491. <td colspan="1" rowspan="1">--config</td>
  492. <td colspan="1" rowspan="1">Path to the Hadoop configuration directory, under which hadoop-site.xml resides.
  493. The hadoop-site.xml must point to the HDFS NameNode from where logs are to be removed.</td>
  494. <td colspan="1" rowspan="1">/home/foo/hadoop/conf</td>
  495. </tr>
  496. <tr>
  497. <td colspan="1" rowspan="1">-l</td>
  498. <td colspan="1" rowspan="1">--logs</td>
  499. <td colspan="1" rowspan="1">A HDFS path, this must be the same HDFS path as specified for the log-destination-uri,
  500. as mentioned in the <a href="hod_config_guide.html#3.7+hodring+options">configuration guide</a>,
  501. without the hdfs:// URI string</td>
  502. <td colspan="1" rowspan="1">/user</td>
  503. </tr>
  504. <tr>
  505. <td colspan="1" rowspan="1">-n</td>
  506. <td colspan="1" rowspan="1">--dynamicdfs</td>
  507. <td colspan="1" rowspan="1">If true, this will indicate that the logcondense.py script should delete HDFS logs
  508. in addition to Map/Reduce logs. Otherwise, it only deletes Map/Reduce logs, which is also the
  509. default if this option is not specified. This option is useful if dynamic DFS installations
  510. are being provisioned by HOD, and the static DFS installation is being used only to collect
  511. logs - a scenario that may be common in test clusters.</td>
  512. <td colspan="1" rowspan="1">false</td>
  513. </tr>
  514. </table>
  515. <p>So, for example, to delete all log files older than 7 days using a hadoop-site.xml stored in
  516. ~/hadoop-conf, using the hadoop installation under ~/hadoop-0.17.0, you could say:</p>
  517. <p>
  518. <em>python logcondense.py -p ~/hadoop-0.17.0/bin/hadoop -d 7 -c ~/hadoop-conf -l /user</em>
  519. </p>
  520. <a name="N10205"></a><a name="checklimits.sh+-+Tool+to+update+torque+comment+field+reflecting+resource+limits"></a>
  521. <h3 class="h4">checklimits.sh - Tool to update torque comment field reflecting resource limits</h3>
  522. <p>checklimits is a HOD tool specific to torque/maui environment. It
  523. updates torque comment field when newly submitted job(s) violate/cross
  524. over user limits set up in maui scheduler. It uses qstat, does one pass
  525. over torque job list to find out queued or unfinished jobs, runs maui
  526. tool checkjob on each job to see if user limits are violated and then
  527. runs torque's qalter utility to update job attribute 'comment'. Currently
  528. it updates the comment as <em>User-limits exceeded. Requested:([0-9]*)
  529. Used:([0-9]*) MaxLimit:([0-9]*)</em> for those jobs that violate limits.
  530. This comment field is then used by HOD to behave accordingly depending on
  531. the type of violation.</p>
  532. <a name="N10211"></a><a name="Running+checklimits.sh"></a>
  533. <h4>Running checklimits.sh</h4>
  534. <p>checklimits.sh is available under hod_install_location/support
  535. folder. This is a shell script and can be run directly as <em>sh
  536. checklimits.sh </em>or as <em>./checklimits.sh</em> after enabling
  537. execute permissions. In order for this tool to be able to update
  538. comment field of jobs from different users, it has to be run with
  539. torque administrative privileges. This tool has to be run repeatedly
  540. after specific intervals of time to frequently update jobs violating
  541. constraints, for e.g. via cron. Please note that the resource manager
  542. and scheduler commands used in this script can be expensive and so
  543. it is better not to run this inside a tight loop without sleeping.</p>
  544. </div>
  545. </div>
  546. <!--+
  547. |end content
  548. +-->
  549. <div class="clearboth">&nbsp;</div>
  550. </div>
  551. <div id="footer">
  552. <!--+
  553. |start bottomstrip
  554. +-->
  555. <div class="lastmodified">
  556. <script type="text/javascript"><!--
  557. document.write("Last Published: " + document.lastModified);
  558. // --></script>
  559. </div>
  560. <div class="copyright">
  561. Copyright &copy;
  562. 2007 <a href="http://www.apache.org/licenses/">The Apache Software Foundation.</a>
  563. </div>
  564. <!--+
  565. |end bottomstrip
  566. +-->
  567. </div>
  568. </body>
  569. </html>