Explorar o código

HADOOP-4687. Fix some of the remaining javadoc warnings.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/core/branches/HADOOP-4687/core@780833 13f79535-47bb-0310-9956-ffa450edef68
Owen O'Malley %!s(int64=16) %!d(string=hai) anos
pai
achega
5c7b7adacb

+ 4 - 6
src/java/org/apache/hadoop/filecache/DistributedCache.java

@@ -34,8 +34,8 @@ import java.net.URI;
  * framework to cache files (text, archives, jars etc.) needed by applications.
  * </p>
  * 
- * <p>Applications specify the files, via urls (hdfs:// or http://) to be cached 
- * via the {@link org.apache.hadoop.mapred.JobConf}.
+ * <p>Applications specify the files, via urls (hdfs:// or http://) to be 
+ * cached via the org.apache.hadoop.mapred.JobConf.
  * The <code>DistributedCache</code> assumes that the
  * files specified via hdfs:// urls are already present on the 
  * {@link FileSystem} at the path specified by the url.</p>
@@ -82,8 +82,8 @@ import java.net.URI;
  *     DistributedCache.addCacheArchive(new URI("/myapp/mytgz.tgz", job);
  *     DistributedCache.addCacheArchive(new URI("/myapp/mytargz.tar.gz", job);
  *     
- *     3. Use the cached files in the {@link org.apache.hadoop.mapred.Mapper}
- *     or {@link org.apache.hadoop.mapred.Reducer}:
+ *     3. Use the cached files in the org.apache.hadoop.mapred.Mapper
+ *     or org.apache.hadoop.mapred.Reducer:
  *     
  *     public static class MapClass extends MapReduceBase  
  *     implements Mapper&lt;K, V, K, V&gt; {
@@ -109,8 +109,6 @@ import java.net.URI;
  *     
  * </pre></blockquote></p>
  * 
- * @see org.apache.hadoop.mapred.JobConf
- * @see org.apache.hadoop.mapred.JobClient
  */
 public class DistributedCache {
   // cacheID to cacheStatus mapping

+ 0 - 8
src/java/org/apache/hadoop/io/SequenceFile.java

@@ -221,9 +221,6 @@ public class SequenceFile {
    * Get the compression type for the reduce outputs
    * @param job the job config to look in
    * @return the kind of compression to use
-   * @deprecated Use 
-   *             {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#getOutputCompressionType(org.apache.hadoop.mapred.JobConf)} 
-   *             to get {@link CompressionType} for job-outputs.
    */
   @Deprecated
   static public CompressionType getCompressionType(Configuration job) {
@@ -236,11 +233,6 @@ public class SequenceFile {
    * Set the compression type for sequence files.
    * @param job the configuration to modify
    * @param val the new compression type (none, block, record)
-   * @deprecated Use the one of the many SequenceFile.createWriter methods to specify
-   *             the {@link CompressionType} while creating the {@link SequenceFile} or
-   *             {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#setOutputCompressionType(org.apache.hadoop.mapred.JobConf, org.apache.hadoop.io.SequenceFile.CompressionType)}
-   *             to specify the {@link CompressionType} for job-outputs. 
-   * or 
    */
   @Deprecated
   static public void setCompressionType(Configuration job, 

+ 2 - 4
src/java/org/apache/hadoop/util/Shell.java

@@ -58,10 +58,8 @@ abstract public class Shell {
   /** 
    * Get the Unix command for setting the maximum virtual memory available
    * to a given child process. This is only relevant when we are forking a
-   * process from within the {@link org.apache.hadoop.mapred.Mapper} or the 
-   * {@link org.apache.hadoop.mapred.Reducer} implementations 
-   * e.g. <a href="{@docRoot}/org/apache/hadoop/mapred/pipes/package-summary.html">Hadoop Pipes</a> 
-   * or <a href="{@docRoot}/org/apache/hadoop/streaming/package-summary.html">Hadoop Streaming</a>.
+   * process from within the Mapper or the Reducer implementations.
+   * see also Hadoop Pipes and Streaming.
    * 
    * It also checks to ensure that we are running on a *nix platform else 
    * (e.g. in Cygwin/Windows) it returns <code>null</code>.

+ 3 - 3
src/java/overview.html

@@ -24,9 +24,9 @@
 Hadoop is a distributed computing platform.
 
 <p>Hadoop primarily consists of the <a 
-href="org/apache/hadoop/hdfs/package-summary.html">Hadoop Distributed FileSystem 
+href="http://hadoop.apache.org/hdfs/">Hadoop Distributed FileSystem 
 (HDFS)</a> and an 
-implementation of the <a href="org/apache/hadoop/mapred/package-summary.html">
+implementation of the <a href="http://hadoop.apache.org/mapreduce/">
 Map-Reduce</a> programming paradigm.</p>
 
 
@@ -153,7 +153,7 @@ specified with the configuration property <tt><a
  href="../core-default.html#fs.default.name">fs.default.name</a></tt>.
 </li>
 
-<li>The {@link org.apache.hadoop.mapred.JobTracker} (MapReduce master)
+<li>The org.apache.hadoop.mapred.JobTracker (MapReduce master)
 host and port.  This is specified with the configuration property
 <tt><a
 href="../mapred-default.html#mapred.job.tracker">mapred.job.tracker</a></tt>.