Browse Source

HADOOP-13942. Build failure due to errors of javadoc build in hadoop-azure. Contributed by Kai Sasaki

Mingliang Liu 8 years ago
parent
commit
c6a5b689db

+ 2 - 1
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/KeyProvider.java

@@ -36,7 +36,8 @@ public interface KeyProvider {
    * @param conf
    *          Hadoop configuration parameters
    * @return the plaintext storage account key
-   * @throws KeyProviderException
+   * @throws KeyProviderException Thrown if there is a problem instantiating a
+   * KeyProvider or retrieving a key using a KeyProvider object.
    */
   String getStorageAccountKey(String accountName, Configuration conf)
       throws KeyProviderException;

+ 20 - 11
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java

@@ -268,7 +268,8 @@ public class NativeAzureFileSystem extends FileSystem {
      *    "innerFile2"
      *  ]
      * } }</pre>
-     * @throws IOException
+     * @param fs file system on which a file is written.
+     * @throws IOException Thrown when fail to write file.
      */
     public void writeFile(FileSystem fs) throws IOException {
       Path path = getRenamePendingFilePath();
@@ -292,6 +293,8 @@ public class NativeAzureFileSystem extends FileSystem {
     /**
      * Return the contents of the JSON file to represent the operations
      * to be performed for a folder rename.
+     *
+     * @return JSON string which represents the operation.
      */
     public String makeRenamePendingFileContents() {
       SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
@@ -418,7 +421,7 @@ public class NativeAzureFileSystem extends FileSystem {
      * when everything is working normally. See redo() for the alternate
      * execution path for the case where we're recovering from a folder rename
      * failure.
-     * @throws IOException
+     * @throws IOException Thrown when fail to renaming.
      */
     public void execute() throws IOException {
 
@@ -472,7 +475,8 @@ public class NativeAzureFileSystem extends FileSystem {
     }
 
     /** Clean up after execution of rename.
-     * @throws IOException */
+     * @throws IOException Thrown when fail to clean up.
+     * */
     public void cleanup() throws IOException {
 
       if (fs.getStoreInterface().isAtomicRenameKey(srcKey)) {
@@ -496,7 +500,7 @@ public class NativeAzureFileSystem extends FileSystem {
      * Recover from a folder rename failure by redoing the intended work,
      * as recorded in the -RenamePending.json file.
      * 
-     * @throws IOException
+     * @throws IOException Thrown when fail to redo.
      */
     public void redo() throws IOException {
 
@@ -1120,6 +1124,7 @@ public class NativeAzureFileSystem extends FileSystem {
 
   /**
    * Creates a new metrics source name that's unique within this process.
+   * @return metric source name
    */
   @VisibleForTesting
   public static String newMetricsSourceName() {
@@ -1253,6 +1258,8 @@ public class NativeAzureFileSystem extends FileSystem {
   /**
    * Convert the path to a key. By convention, any leading or trailing slash is
    * removed, except for the special case of a single slash.
+   * @param path path converted to a key
+   * @return key string
    */
   @VisibleForTesting
   public String pathToKey(Path path) {
@@ -1307,7 +1314,7 @@ public class NativeAzureFileSystem extends FileSystem {
    * Get the absolute version of the path (fully qualified).
    * This is public for testing purposes.
    *
-   * @param path
+   * @param path path to be absolute path.
    * @return fully qualified path
    */
   @VisibleForTesting
@@ -1415,6 +1422,8 @@ public class NativeAzureFileSystem extends FileSystem {
 
   /**
    * Get a self-renewing lease on the specified file.
+   * @param path path whose lease to be renewed.
+   * @return Lease
    */
   public SelfRenewingLease acquireLease(Path path) throws AzureException {
     String fullKey = pathToKey(makeAbsolute(path));
@@ -1662,12 +1671,12 @@ public class NativeAzureFileSystem extends FileSystem {
    * modified time is not necessary, it's easier to just skip
    * the modified time update.
    *
-   * @param f
-   * @param recursive
+   * @param f file path to be deleted.
+   * @param recursive specify deleting recursively or not.
    * @param skipParentFolderLastModifidedTimeUpdate If true, don't update the folder last
    * modified time.
    * @return true if and only if the file is deleted
-   * @throws IOException
+   * @throws IOException Thrown when fail to delete file or directory.
    */
   public boolean delete(Path f, boolean recursive,
       boolean skipParentFolderLastModifidedTimeUpdate) throws IOException {
@@ -2890,7 +2899,7 @@ public class NativeAzureFileSystem extends FileSystem {
    *          The root path to consider.
    * @param destination
    *          The destination path to move any recovered files to.
-   * @throws IOException
+   * @throws IOException Thrown when fail to recover files.
    */
   public void recoverFilesWithDanglingTempData(Path root, Path destination)
       throws IOException {
@@ -2908,7 +2917,7 @@ public class NativeAzureFileSystem extends FileSystem {
    * 
    * @param root
    *          The root path to consider.
-   * @throws IOException
+   * @throws IOException Thrown when fail to delete.
    */
   public void deleteFilesWithDanglingTempData(Path root) throws IOException {
 
@@ -2928,7 +2937,7 @@ public class NativeAzureFileSystem extends FileSystem {
    * Upload data to a random temporary file then do storage side renaming to
    * recover the original key.
    * 
-   * @param aKey
+   * @param aKey a key to be encoded.
    * @return Encoded version of the original key.
    */
   private static String encodeKey(String aKey) {

+ 3 - 2
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeFileSystemStore.java

@@ -93,7 +93,8 @@ interface NativeFileSystemStore {
   /**
    * Delete all keys with the given prefix. Used for testing.
    *
-   * @throws IOException
+   * @param prefix prefix of objects to be deleted.
+   * @throws IOException Exception encountered while deleting keys.
    */
   @VisibleForTesting
   void purge(String prefix) throws IOException;
@@ -101,7 +102,7 @@ interface NativeFileSystemStore {
   /**
    * Diagnostic method to dump state to the console.
    *
-   * @throws IOException
+   * @throws IOException Exception encountered while dumping to console.
    */
   void dump() throws IOException;
 

+ 4 - 2
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SASKeyGeneratorInterface.java

@@ -41,7 +41,8 @@ public interface SASKeyGeneratorInterface {
    * @param container
    *          - Container name within the storage account.
    * @return SAS URI for the container.
-   * @throws SASKeyGenerationException
+   * @throws SASKeyGenerationException Exception that gets thrown during
+   * generation of SAS Key.
    */
   URI getContainerSASUri(String accountName, String container)
       throws SASKeyGenerationException;
@@ -57,7 +58,8 @@ public interface SASKeyGeneratorInterface {
    * @param relativePath
    *          - Relative path within the container
    * @return SAS URI for the relative path blob.
-   * @throws SASKeyGenerationException
+   * @throws SASKeyGenerationException Exception that gets thrown during
+   * generation of SAS Key.
    */
   URI getRelativeBlobSASUri(String accountName, String container,
       String relativePath) throws SASKeyGenerationException;

+ 1 - 1
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java

@@ -111,7 +111,7 @@ public class SelfRenewingLease {
 
   /**
    * Free the lease and stop the keep-alive thread.
-   * @throws StorageException
+   * @throws StorageException Thrown when fail to free the lease.
    */
   public void free() throws StorageException {
     AccessCondition accessCondition = AccessCondition.generateEmptyCondition();

+ 2 - 1
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SendRequestIntercept.java

@@ -94,7 +94,8 @@ public final class SendRequestIntercept extends StorageEvent<SendingRequestEvent
    * Binds a new lister to the operation context so the WASB file system can
    * appropriately intercept sends. By allowing concurrent OOB I/Os, we bypass
    * the blob immutability check when reading streams.
-   * 
+   *
+   * @param storageCreds The credential of blob storage.
    * @param opContext
    *          The operation context to bind to listener.
    * 

+ 17 - 11
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterface.java

@@ -72,7 +72,8 @@ abstract class StorageInterface {
    * property, so that all subsequent requests made via the service client will
    * use the new timeout interval. You can also change this value for an
    * individual request, by setting the
-   * {@link RequestOptions#timeoutIntervalInMs} property.
+   * {@link com.microsoft.azure.storage.RequestOptions#timeoutIntervalInMs}
+   * property.
    * 
    * If you are downloading a large blob, you should increase the value of the
    * timeout beyond the default value.
@@ -94,7 +95,8 @@ abstract class StorageInterface {
 
   /**
    * Creates a new Blob service client.
-   * 
+   *
+   * @param account cloud storage account.
    */
   public abstract void createBlobClient(CloudStorageAccount account);
 
@@ -149,8 +151,9 @@ abstract class StorageInterface {
       throws URISyntaxException, StorageException;
 
   /**
-   * A thin wrapper over the {@link CloudBlobDirectory} class that simply
-   * redirects calls to the real object except in unit tests.
+   * A thin wrapper over the
+   * {@link com.microsoft.azure.storage.blob.CloudBlobDirectory} class
+   * that simply redirects calls to the real object except in unit tests.
    */
   @InterfaceAudience.Private
   public abstract static class CloudBlobDirectoryWrapper implements
@@ -184,7 +187,7 @@ abstract class StorageInterface {
      *          A {@link BlobRequestOptions} object that specifies any
      *          additional options for the request. Specifying <code>null</code>
      *          will use the default request options from the associated service
-     *          client ( {@link CloudBlobClient}).
+     *          client ({@link com.microsoft.azure.storage.blob.CloudBlobClient}).
      * @param opContext
      *          An {@link OperationContext} object that represents the context
      *          for the current operation. This object is used to track requests
@@ -207,8 +210,9 @@ abstract class StorageInterface {
   }
 
   /**
-   * A thin wrapper over the {@link CloudBlobContainer} class that simply
-   * redirects calls to the real object except in unit tests.
+   * A thin wrapper over the
+   * {@link com.microsoft.azure.storage.blob.CloudBlobContainer} class
+   * that simply redirects calls to the real object except in unit tests.
    */
   @InterfaceAudience.Private
   public abstract static class CloudBlobContainerWrapper {
@@ -608,8 +612,9 @@ abstract class StorageInterface {
   }
 
   /**
-   * A thin wrapper over the {@link CloudBlockBlob} class that simply redirects calls
-   * to the real object except in unit tests.
+   * A thin wrapper over the
+   * {@link com.microsoft.azure.storage.blob.CloudBlockBlob} class
+   * that simply redirects calls to the real object except in unit tests.
    */
   public abstract interface CloudBlockBlobWrapper
       extends CloudBlobWrapper {
@@ -690,8 +695,9 @@ abstract class StorageInterface {
   }
 
   /**
-   * A thin wrapper over the {@link CloudPageBlob} class that simply redirects calls
-   * to the real object except in unit tests.
+   * A thin wrapper over the
+   * {@link com.microsoft.azure.storage.blob.CloudPageBlob}
+   * class that simply redirects calls to the real object except in unit tests.
    */
   public abstract interface CloudPageBlobWrapper
       extends CloudBlobWrapper {

+ 2 - 0
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/AzureFileSystemInstrumentation.java

@@ -187,6 +187,7 @@ public final class AzureFileSystemInstrumentation implements MetricsSource {
 
   /**
    * The unique identifier for this file system in the metrics.
+   * @return The unique identifier.
    */
   public UUID getFileSystemInstanceId() {
     return fileSystemInstanceId;
@@ -194,6 +195,7 @@ public final class AzureFileSystemInstrumentation implements MetricsSource {
   
   /**
    * Get the metrics registry information.
+   * @return The metrics registry information.
    */
   public MetricsInfo getMetricsRegistryInfo() {
     return registry.info();