123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721 |
- <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
- <html>
- <head>
- <META http-equiv="Content-Type" content="text/html; charset=UTF-8">
- <meta content="Apache Forrest" name="Generator">
- <meta name="Forrest-version" content="0.8">
- <meta name="Forrest-skin-name" content="pelt">
- <title>
- HOD Administrator Guide
- </title>
- <link type="text/css" href="skin/basic.css" rel="stylesheet">
- <link media="screen" type="text/css" href="skin/screen.css" rel="stylesheet">
- <link media="print" type="text/css" href="skin/print.css" rel="stylesheet">
- <link type="text/css" href="skin/profile.css" rel="stylesheet">
- <script src="skin/getBlank.js" language="javascript" type="text/javascript"></script><script src="skin/getMenu.js" language="javascript" type="text/javascript"></script><script src="skin/fontsize.js" language="javascript" type="text/javascript"></script>
- <link rel="shortcut icon" href="images/favicon.ico">
- </head>
- <body onload="init()">
- <script type="text/javascript">ndeSetTextSize();</script>
- <div id="top">
- <!--+
- |breadtrail
- +-->
- <div class="breadtrail">
- <a href="http://www.apache.org/">Apache</a> > <a href="http://hadoop.apache.org/">Hadoop</a> > <a href="http://hadoop.apache.org/core/">Core</a><script src="skin/breadcrumbs.js" language="JavaScript" type="text/javascript"></script>
- </div>
- <!--+
- |header
- +-->
- <div class="header">
- <!--+
- |start group logo
- +-->
- <div class="grouplogo">
- <a href="http://hadoop.apache.org/"><img class="logoImage" alt="Hadoop" src="images/hadoop-logo.jpg" title="Apache Hadoop"></a>
- </div>
- <!--+
- |end group logo
- +-->
- <!--+
- |start Project Logo
- +-->
- <div class="projectlogo">
- <a href="http://hadoop.apache.org/core/"><img class="logoImage" alt="Hadoop" src="images/core-logo.gif" title="Scalable Computing Platform"></a>
- </div>
- <!--+
- |end Project Logo
- +-->
- <!--+
- |start Search
- +-->
- <div class="searchbox">
- <form action="http://www.google.com/search" method="get" class="roundtopsmall">
- <input value="hadoop.apache.org" name="sitesearch" type="hidden"><input onFocus="getBlank (this, 'Search the site with google');" size="25" name="q" id="query" type="text" value="Search the site with google">
- <input name="Search" value="Search" type="submit">
- </form>
- </div>
- <!--+
- |end search
- +-->
- <!--+
- |start Tabs
- +-->
- <ul id="tabs">
- <li>
- <a class="unselected" href="http://hadoop.apache.org/core/">Project</a>
- </li>
- <li>
- <a class="unselected" href="http://wiki.apache.org/hadoop">Wiki</a>
- </li>
- <li class="current">
- <a class="selected" href="index.html">Hadoop 0.19 Documentation</a>
- </li>
- </ul>
- <!--+
- |end Tabs
- +-->
- </div>
- </div>
- <div id="main">
- <div id="publishedStrip">
- <!--+
- |start Subtabs
- +-->
- <div id="level2tabs"></div>
- <!--+
- |end Endtabs
- +-->
- <script type="text/javascript"><!--
- document.write("Last Published: " + document.lastModified);
- // --></script>
- </div>
- <!--+
- |breadtrail
- +-->
- <div class="breadtrail">
-
- </div>
- <!--+
- |start Menu, mainarea
- +-->
- <!--+
- |start Menu
- +-->
- <div id="menu">
- <div onclick="SwitchMenu('menu_selected_1.1', 'skin/')" id="menu_selected_1.1Title" class="menutitle" style="background-image: url('skin/images/chapter_open.gif');">Documentation</div>
- <div id="menu_selected_1.1" class="selectedmenuitemgroup" style="display: block;">
- <div class="menuitem">
- <a href="index.html">Overview</a>
- </div>
- <div class="menuitem">
- <a href="quickstart.html">Hadoop Quick Start</a>
- </div>
- <div class="menuitem">
- <a href="cluster_setup.html">Hadoop Cluster Setup</a>
- </div>
- <div class="menuitem">
- <a href="mapred_tutorial.html">Hadoop Map/Reduce Tutorial</a>
- </div>
- <div class="menuitem">
- <a href="commands_manual.html">Hadoop Command Guide</a>
- </div>
- <div class="menuitem">
- <a href="hdfs_shell.html">Hadoop FS Shell Guide</a>
- </div>
- <div class="menuitem">
- <a href="distcp.html">Hadoop DistCp Guide</a>
- </div>
- <div class="menuitem">
- <a href="native_libraries.html">Hadoop Native Libraries</a>
- </div>
- <div class="menuitem">
- <a href="streaming.html">Hadoop Streaming</a>
- </div>
- <div class="menuitem">
- <a href="hadoop_archives.html">Hadoop Archives</a>
- </div>
- <div class="menuitem">
- <a href="hdfs_user_guide.html">HDFS User Guide</a>
- </div>
- <div class="menuitem">
- <a href="hdfs_design.html">HDFS Architecture</a>
- </div>
- <div class="menuitem">
- <a href="hdfs_permissions_guide.html">HDFS Admin Guide: Permissions</a>
- </div>
- <div class="menuitem">
- <a href="hdfs_quota_admin_guide.html">HDFS Admin Guide: Quotas</a>
- </div>
- <div class="menuitem">
- <a href="SLG_user_guide.html">HDFS Utilities</a>
- </div>
- <div class="menuitem">
- <a href="hod_user_guide.html">HOD User Guide</a>
- </div>
- <div class="menupage">
- <div class="menupagetitle">HOD Admin Guide</div>
- </div>
- <div class="menuitem">
- <a href="hod_config_guide.html">HOD Config Guide</a>
- </div>
- <div class="menuitem">
- <a href="capacity_scheduler.html">Capacity Scheduler</a>
- </div>
- <div class="menuitem">
- <a href="api/index.html">API Docs</a>
- </div>
- <div class="menuitem">
- <a href="jdiff/changes.html">API Changes</a>
- </div>
- <div class="menuitem">
- <a href="http://wiki.apache.org/hadoop/">Wiki</a>
- </div>
- <div class="menuitem">
- <a href="http://wiki.apache.org/hadoop/FAQ">FAQ</a>
- </div>
- <div class="menuitem">
- <a href="releasenotes.html">Release Notes</a>
- </div>
- <div class="menuitem">
- <a href="changes.html">Change Log</a>
- </div>
- </div>
- <div id="credit"></div>
- <div id="roundbottom">
- <img style="display: none" class="corner" height="15" width="15" alt="" src="skin/images/rc-b-l-15-1body-2menu-3menu.png"></div>
- <!--+
- |alternative credits
- +-->
- <div id="credit2"></div>
- </div>
- <!--+
- |end Menu
- +-->
- <!--+
- |start content
- +-->
- <div id="content">
- <div title="Portable Document Format" class="pdflink">
- <a class="dida" href="hod_admin_guide.pdf"><img alt="PDF -icon" src="skin/images/pdfdoc.gif" class="skin"><br>
- PDF</a>
- </div>
- <h1>
- HOD Administrator Guide
- </h1>
- <div id="minitoc-area">
- <ul class="minitoc">
- <li>
- <a href="#Overview">Overview</a>
- </li>
- <li>
- <a href="#Pre-requisites">Pre-requisites</a>
- </li>
- <li>
- <a href="#Resource+Manager">Resource Manager</a>
- </li>
- <li>
- <a href="#Installing+HOD">Installing HOD</a>
- </li>
- <li>
- <a href="#Configuring+HOD">Configuring HOD</a>
- <ul class="minitoc">
- <li>
- <a href="#Minimal+Configuration">Minimal Configuration</a>
- </li>
- <li>
- <a href="#Advanced+Configuration">Advanced Configuration</a>
- </li>
- </ul>
- </li>
- <li>
- <a href="#Running+HOD">Running HOD</a>
- </li>
- <li>
- <a href="#Supporting+Tools+and+Utilities">Supporting Tools and Utilities</a>
- <ul class="minitoc">
- <li>
- <a href="#logcondense.py+-+Manage+Log+Files">logcondense.py - Manage Log Files</a>
- <ul class="minitoc">
- <li>
- <a href="#Running+logcondense.py">Running logcondense.py</a>
- </li>
- <li>
- <a href="#Command+Line+Options+for+logcondense.py">Command Line Options for logcondense.py</a>
- </li>
- </ul>
- </li>
- <li>
- <a href="#checklimits.sh+-+Monitor+Resource+Limits">checklimits.sh - Monitor Resource Limits</a>
- <ul class="minitoc">
- <li>
- <a href="#Running+checklimits.sh">Running checklimits.sh</a>
- </li>
- </ul>
- </li>
- <li>
- <a href="#verify-account+-+Script+to+verify+an+account+under+which+%0A+++++++++++++jobs+are+submitted">verify-account - Script to verify an account under which
- jobs are submitted</a>
- <ul class="minitoc">
- <li>
- <a href="#Integrating+the+verify-account+script+with+HOD">Integrating the verify-account script with HOD</a>
- </li>
- </ul>
- </li>
- </ul>
- </li>
- </ul>
- </div>
- <a name="N1000C"></a><a name="Overview"></a>
- <h2 class="h3">Overview</h2>
- <div class="section">
- <p>Hadoop On Demand (HOD) is a system for provisioning and
- managing independent Hadoop Map/Reduce and Hadoop Distributed File System (HDFS)
- instances on a shared cluster
- of nodes. HOD is a tool that makes it easy for administrators and users to
- quickly setup and use Hadoop. HOD is also a very useful tool for Hadoop developers
- and testers who need to share a physical cluster for testing their own Hadoop
- versions.
- </p>
- <p>HOD relies on a resource manager (RM) for allocation of nodes that it can use for
- running Hadoop instances. At present it runs with the <a href="http://www.clusterresources.com/pages/products/torque-resource-manager.php">Torque
- resource manager</a>.
- </p>
- <p>
- The basic system architecture of HOD includes these components:</p>
- <ul>
-
- <li>A Resource manager (possibly together with a scheduler)</li>
-
- <li>Various HOD components</li>
-
- <li>Hadoop Map/Reduce and HDFS daemons</li>
- </ul>
- <p>
- HOD provisions and maintains Hadoop Map/Reduce and, optionally, HDFS instances
- through interaction with the above components on a given cluster of nodes. A cluster of
- nodes can be thought of as comprising two sets of nodes:</p>
- <ul>
-
- <li>Submit nodes: Users use the HOD client on these nodes to allocate clusters, and then
- use the Hadoop client to submit Hadoop jobs. </li>
-
- <li>Compute nodes: Using the resource manager, HOD components are run on these nodes to
- provision the Hadoop daemons. After that Hadoop jobs run on them.</li>
- </ul>
- <p>
- Here is a brief description of the sequence of operations in allocating a cluster and
- running jobs on them.
- </p>
- <ul>
-
- <li>The user uses the HOD client on the Submit node to allocate a desired number of
- cluster nodes and to provision Hadoop on them.</li>
-
- <li>The HOD client uses a resource manager interface (qsub, in Torque) to submit a HOD
- process, called the RingMaster, as a Resource Manager job, to request the user's desired number
- of nodes. This job is submitted to the central server of the resource manager (pbs_server, in Torque).</li>
-
- <li>On the compute nodes, the resource manager slave daemons (pbs_moms in Torque) accept
- and run jobs that they are assigned by the central server (pbs_server in Torque). The RingMaster
- process is started on one of the compute nodes (mother superior, in Torque).</li>
-
- <li>The RingMaster then uses another resource manager interface (pbsdsh, in Torque) to run
- the second HOD component, HodRing, as distributed tasks on each of the compute
- nodes allocated.</li>
-
- <li>The HodRings, after initializing, communicate with the RingMaster to get Hadoop commands,
- and run them accordingly. Once the Hadoop commands are started, they register with the RingMaster,
- giving information about the daemons.</li>
-
- <li>All the configuration files needed for Hadoop instances are generated by HOD itself,
- some obtained from options given by user in its own configuration file.</li>
-
- <li>The HOD client keeps communicating with the RingMaster to find out the location of the
- JobTracker and HDFS daemons.</li>
- </ul>
- <p>This guide shows you how to get started using HOD, reviews various HOD features and command line options, and provides detailed troubleshooting help.</p>
- </div>
- <a name="N10056"></a><a name="Pre-requisites"></a>
- <h2 class="h3">Pre-requisites</h2>
- <div class="section">
- <p>To use HOD, your system should include the following hardware and software
- components.</p>
- <p>Operating System: HOD is currently tested on RHEL4.<br>
- Nodes : HOD requires a minimum of three nodes configured through a resource manager.<br>
- </p>
- <p> Software </p>
- <p>The following components must be installed on ALL nodes before using HOD:</p>
- <ul>
-
- <li>
- <a href="http://www.clusterresources.com/pages/products/torque-resource-manager.php">Torque: Resource manager</a>
- </li>
-
- <li>
- <a href="http://www.python.org">Python</a> : HOD requires version 2.5.1 of Python.</li>
- </ul>
- <p>The following components are optional and can be installed to obtain better
- functionality from HOD:</p>
- <ul>
-
- <li>
- <a href="http://twistedmatrix.com/trac/">Twisted Python</a>: This can be
- used for improving the scalability of HOD. If this module is detected to be
- installed, HOD uses it, else it falls back to default modules.</li>
-
- <li>
- <a href="http://hadoop.apache.org/core/">Hadoop</a>: HOD can automatically
- distribute Hadoop to all nodes in the cluster. However, it can also use a
- pre-installed version of Hadoop, if it is available on all nodes in the cluster.
- HOD currently supports Hadoop 0.15 and above.</li>
- </ul>
- <p>NOTE: HOD configuration requires the location of installs of these
- components to be the same on all nodes in the cluster. It will also
- make the configuration simpler to have the same location on the submit
- nodes.
- </p>
- </div>
- <a name="N1008F"></a><a name="Resource+Manager"></a>
- <h2 class="h3">Resource Manager</h2>
- <div class="section">
- <p> Currently HOD works with the Torque resource manager, which it uses for its node
- allocation and job submission. Torque is an open source resource manager from
- <a href="http://www.clusterresources.com">Cluster Resources</a>, a community effort
- based on the PBS project. It provides control over batch jobs and distributed compute nodes. Torque is
- freely available for download from <a href="http://www.clusterresources.com/downloads/torque/">here</a>.
- </p>
- <p> All documentation related to torque can be seen under
- the section TORQUE Resource Manager <a href="http://www.clusterresources.com/pages/resources/documentation.php">here</a>. You can
- get wiki documentation from <a href="http://www.clusterresources.com/wiki/doku.php?id=torque:torque_wiki">here</a>.
- Users may wish to subscribe to TORQUE’s mailing list or view the archive for questions,
- comments <a href="http://www.clusterresources.com/pages/resources/mailing-lists.php">here</a>.
- </p>
- <p>To use HOD with Torque:</p>
- <ul>
-
- <li>Install Torque components: pbs_server on one node (head node), pbs_mom on all
- compute nodes, and PBS client tools on all compute nodes and submit
- nodes. Perform at least a basic configuration so that the Torque system is up and
- running, that is, pbs_server knows which machines to talk to. Look <a href="http://www.clusterresources.com/wiki/doku.php?id=torque:1.2_basic_configuration">here</a>
- for basic configuration.
- For advanced configuration, see <a href="http://www.clusterresources.com/wiki/doku.php?id=torque:1.3_advanced_configuration">here</a>
- </li>
-
- <li>Create a queue for submitting jobs on the pbs_server. The name of the queue is the
- same as the HOD configuration parameter, resource-manager.queue. The HOD client uses this queue to
- submit the RingMaster process as a Torque job.</li>
-
- <li>Specify a cluster name as a property for all nodes in the cluster.
- This can be done by using the qmgr command. For example:
- <span class="codefrag">qmgr -c "set node node properties=cluster-name"</span>. The name of the cluster is the same as
- the HOD configuration parameter, hod.cluster. </li>
-
- <li>Make sure that jobs can be submitted to the nodes. This can be done by
- using the qsub command. For example:
- <span class="codefrag">echo "sleep 30" | qsub -l nodes=3</span>
- </li>
- </ul>
- </div>
- <a name="N100CE"></a><a name="Installing+HOD"></a>
- <h2 class="h3">Installing HOD</h2>
- <div class="section">
- <p>Once the resource manager is set up, you can obtain and
- install HOD.</p>
- <ul>
-
- <li>If you are getting HOD from the Hadoop tarball, it is available under the
- 'contrib' section of Hadoop, under the root directory 'hod'.</li>
-
- <li>If you are building from source, you can run ant tar from the Hadoop root
- directory to generate the Hadoop tarball, and then get HOD from there,
- as described above.</li>
-
- <li>Distribute the files under this directory to all the nodes in the
- cluster. Note that the location where the files are copied should be
- the same on all the nodes.</li>
-
- <li>Note that compiling hadoop would build HOD with appropriate permissions
- set on all the required script files in HOD.</li>
- </ul>
- </div>
- <a name="N100E7"></a><a name="Configuring+HOD"></a>
- <h2 class="h3">Configuring HOD</h2>
- <div class="section">
- <p>You can configure HOD once it is installed. The minimal configuration needed
- to run HOD is described below. More advanced configuration options are discussed
- in the HOD Configuration Guide.</p>
- <a name="N100F0"></a><a name="Minimal+Configuration"></a>
- <h3 class="h4">Minimal Configuration</h3>
- <p>To get started using HOD, the following minimal configuration is
- required:</p>
- <ul>
-
- <li>On the node from where you want to run HOD, edit the file hodrc
- located in the <install dir>/conf directory. This file
- contains the minimal set of values required to run hod.</li>
-
- <li>
- <p>Specify values suitable to your environment for the following
- variables defined in the configuration file. Note that some of these
- variables are defined at more than one place in the file.</p>
-
- <ul>
-
- <li>${JAVA_HOME}: Location of Java for Hadoop. Hadoop supports Sun JDK
- 1.6.x and above.</li>
-
- <li>${CLUSTER_NAME}: Name of the cluster which is specified in the
- 'node property' as mentioned in resource manager configuration.</li>
-
- <li>${HADOOP_HOME}: Location of Hadoop installation on the compute and
- submit nodes.</li>
-
- <li>${RM_QUEUE}: Queue configured for submitting jobs in the resource
- manager configuration.</li>
-
- <li>${RM_HOME}: Location of the resource manager installation on the
- compute and submit nodes.</li>
-
- </ul>
- </li>
- <li>
- <p>The following environment variables may need to be set depending on
- your environment. These variables must be defined where you run the
- HOD client and must also be specified in the HOD configuration file as the
- value of the key resource_manager.env-vars. Multiple variables can be
- specified as a comma separated list of key=value pairs.</p>
-
- <ul>
-
- <li>HOD_PYTHON_HOME: If you install python to a non-default location
- of the compute nodes, or submit nodes, then this variable must be
- defined to point to the python executable in the non-standard
- location.</li>
-
- </ul>
- </li>
- </ul>
- <a name="N10124"></a><a name="Advanced+Configuration"></a>
- <h3 class="h4">Advanced Configuration</h3>
- <p> You can review and modify other configuration options to suit
- your specific needs. Refer to the <a href="hod_config_guide.html">HOD Configuration
- Guide</a> for more information.</p>
- </div>
-
- <a name="N10133"></a><a name="Running+HOD"></a>
- <h2 class="h3">Running HOD</h2>
- <div class="section">
- <p>You can run HOD once it is configured. Refer to the<a href="hod_user_guide.html"> HOD User Guide</a> for more information.</p>
- </div>
-
- <a name="N10141"></a><a name="Supporting+Tools+and+Utilities"></a>
- <h2 class="h3">Supporting Tools and Utilities</h2>
- <div class="section">
- <p>This section describes supporting tools and utilities that can be used to
- manage HOD deployments.</p>
- <a name="N1014A"></a><a name="logcondense.py+-+Manage+Log+Files"></a>
- <h3 class="h4">logcondense.py - Manage Log Files</h3>
- <p>As mentioned in the
- <a href="hod_user_guide.html#Collecting+and+Viewing+Hadoop+Logs">HOD User Guide</a>,
- HOD can be configured to upload
- Hadoop logs to a statically configured HDFS. Over time, the number of logs uploaded
- to HDFS could increase. logcondense.py is a tool that helps
- administrators to remove log files uploaded to HDFS. </p>
- <a name="N10157"></a><a name="Running+logcondense.py"></a>
- <h4>Running logcondense.py</h4>
- <p>logcondense.py is available under hod_install_location/support folder. You can either
- run it using python, for example, <em>python logcondense.py</em>, or give execute permissions
- to the file, and directly run it as <em>logcondense.py</em>. logcondense.py needs to be
- run by a user who has sufficient permissions to remove files from locations where log
- files are uploaded in the HDFS, if permissions are enabled. For example as mentioned in the
- <a href="hod_config_guide.html#3.7+hodring+options">HOD Configuration Guide</a>, the logs could
- be configured to come under the user's home directory in HDFS. In that case, the user
- running logcondense.py should have super user privileges to remove the files from under
- all user home directories.</p>
- <a name="N1016B"></a><a name="Command+Line+Options+for+logcondense.py"></a>
- <h4>Command Line Options for logcondense.py</h4>
- <p>The following command line options are supported for logcondense.py.</p>
- <table class="ForrestTable" cellspacing="1" cellpadding="4">
-
- <tr>
-
- <td colspan="1" rowspan="1">Short Option</td>
- <td colspan="1" rowspan="1">Long option</td>
- <td colspan="1" rowspan="1">Meaning</td>
- <td colspan="1" rowspan="1">Example</td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">-p</td>
- <td colspan="1" rowspan="1">--package</td>
- <td colspan="1" rowspan="1">Complete path to the hadoop script. The version of hadoop must be the same as the
- one running HDFS.</td>
- <td colspan="1" rowspan="1">/usr/bin/hadoop</td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">-d</td>
- <td colspan="1" rowspan="1">--days</td>
- <td colspan="1" rowspan="1">Delete log files older than the specified number of days</td>
- <td colspan="1" rowspan="1">7</td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">-c</td>
- <td colspan="1" rowspan="1">--config</td>
- <td colspan="1" rowspan="1">Path to the Hadoop configuration directory, under which hadoop-site.xml resides.
- The hadoop-site.xml must point to the HDFS NameNode from where logs are to be removed.</td>
- <td colspan="1" rowspan="1">/home/foo/hadoop/conf</td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">-l</td>
- <td colspan="1" rowspan="1">--logs</td>
- <td colspan="1" rowspan="1">A HDFS path, this must be the same HDFS path as specified for the log-destination-uri,
- as mentioned in the <a href="hod_config_guide.html#3.7+hodring+options">HOD Configuration Guide</a>,
- without the hdfs:// URI string</td>
- <td colspan="1" rowspan="1">/user</td>
-
- </tr>
-
- <tr>
-
- <td colspan="1" rowspan="1">-n</td>
- <td colspan="1" rowspan="1">--dynamicdfs</td>
- <td colspan="1" rowspan="1">If true, this will indicate that the logcondense.py script should delete HDFS logs
- in addition to Map/Reduce logs. Otherwise, it only deletes Map/Reduce logs, which is also the
- default if this option is not specified. This option is useful if
- dynamic HDFS installations
- are being provisioned by HOD, and the static HDFS installation is being used only to collect
- logs - a scenario that may be common in test clusters.</td>
- <td colspan="1" rowspan="1">false</td>
-
- </tr>
-
- </table>
- <p>So, for example, to delete all log files older than 7 days using a hadoop-site.xml stored in
- ~/hadoop-conf, using the hadoop installation under ~/hadoop-0.17.0, you could say:</p>
- <p>
- <em>python logcondense.py -p ~/hadoop-0.17.0/bin/hadoop -d 7 -c ~/hadoop-conf -l /user</em>
- </p>
- <a name="N1020E"></a><a name="checklimits.sh+-+Monitor+Resource+Limits"></a>
- <h3 class="h4">checklimits.sh - Monitor Resource Limits</h3>
- <p>checklimits.sh is a HOD tool specific to the Torque/Maui environment
- (<a href="http://www.clusterresources.com/pages/products/maui-cluster-scheduler.php">Maui Cluster Scheduler</a> is an open source job
- scheduler for clusters and supercomputers, from clusterresources). The
- checklimits.sh script
- updates the torque comment field when newly submitted job(s) violate or
- exceed
- over user limits set up in Maui scheduler. It uses qstat, does one pass
- over the torque job-list to determine queued or unfinished jobs, runs Maui
- tool checkjob on each job to see if user limits are violated and then
- runs torque's qalter utility to update job attribute 'comment'. Currently
- it updates the comment as <em>User-limits exceeded. Requested:([0-9]*)
- Used:([0-9]*) MaxLimit:([0-9]*)</em> for those jobs that violate limits.
- This comment field is then used by HOD to behave accordingly depending on
- the type of violation.</p>
- <a name="N1021E"></a><a name="Running+checklimits.sh"></a>
- <h4>Running checklimits.sh</h4>
- <p>checklimits.sh is available under the hod_install_location/support
- folder. This shell script can be run directly as <em>sh
- checklimits.sh </em>or as <em>./checklimits.sh</em> after enabling
- execute permissions. Torque and Maui binaries should be available
- on the machine where the tool is run and should be in the path
- of the shell script process. To update the
- comment field of jobs from different users, this tool must be run with
- torque administrative privileges. This tool must be run repeatedly
- after specific intervals of time to frequently update jobs violating
- constraints, for example via cron. Please note that the resource manager
- and scheduler commands used in this script can be expensive and so
- it is better not to run this inside a tight loop without sleeping.</p>
- <a name="N1022F"></a><a name="verify-account+-+Script+to+verify+an+account+under+which+%0A+++++++++++++jobs+are+submitted"></a>
- <h3 class="h4">verify-account - Script to verify an account under which
- jobs are submitted</h3>
- <p>Production systems use accounting packages to charge users for using
- shared compute resources. HOD supports a parameter
- <em>resource_manager.pbs-account</em> to allow users to identify the
- account under which they would like to submit jobs. It may be necessary
- to verify that this account is a valid one configured in an accounting
- system. The <em>hod-install-dir/bin/verify-account</em> script
- provides a mechanism to plug-in a custom script that can do this
- verification.</p>
- <a name="N1023E"></a><a name="Integrating+the+verify-account+script+with+HOD"></a>
- <h4>Integrating the verify-account script with HOD</h4>
- <p>HOD runs the <em>verify-account</em> script passing in the
- <em>resource_manager.pbs-account</em> value as argument to the script,
- before allocating a cluster. Sites can write a script that verify this
- account against their accounting systems. Returning a non-zero exit
- code from this script will cause HOD to fail allocation. Also, in
- case of an error, HOD will print the output of script to the user.
- Any descriptive error message can be passed to the user from the
- script in this manner.</p>
- <p>The default script that comes with the HOD installation does not
- do any validation, and returns a zero exit code.</p>
- <p>If the verify-account script is not found, then HOD will treat
- that verification is disabled, and continue allocation as is.</p>
- </div>
- </div>
- <!--+
- |end content
- +-->
- <div class="clearboth"> </div>
- </div>
- <div id="footer">
- <!--+
- |start bottomstrip
- +-->
- <div class="lastmodified">
- <script type="text/javascript"><!--
- document.write("Last Published: " + document.lastModified);
- // --></script>
- </div>
- <div class="copyright">
- Copyright ©
- 2008 <a href="http://www.apache.org/licenses/">The Apache Software Foundation.</a>
- </div>
- <!--+
- |end bottomstrip
- +-->
- </div>
- </body>
- </html>
|