Sfoglia il codice sorgente

HDDS-842. [JDK11] Fix Javadoc errors in hadoop-hdds-common module. Contributed by Dinesh Chitlangia.

Giovanni Matteo Fumarola 6 anni fa
parent
commit
fcd94eeab8

+ 1 - 1
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java

@@ -104,7 +104,7 @@ public interface ScmClient extends Closeable {
    * Lists a range of containers and get their info.
    * Lists a range of containers and get their info.
    *
    *
    * @param startContainerID start containerID.
    * @param startContainerID start containerID.
-   * @param count count must be > 0.
+   * @param count count must be {@literal >} 0.
    *
    *
    * @return a list of pipeline.
    * @return a list of pipeline.
    * @throws IOException
    * @throws IOException

+ 1 - 1
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/LocatedContainer.java

@@ -82,7 +82,7 @@ public final class LocatedContainer {
   /**
   /**
    * Returns the nodes that currently host the container.
    * Returns the nodes that currently host the container.
    *
    *
-   * @return Set<DatanodeInfo> nodes that currently host the container
+   * @return {@code Set<DatanodeInfo>} nodes that currently host the container
    */
    */
   public Set<DatanodeInfo> getLocations() {
   public Set<DatanodeInfo> getLocations() {
     return this.locations;
     return this.locations;

+ 1 - 1
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmLocatedBlock.java

@@ -68,7 +68,7 @@ public final class ScmLocatedBlock {
   /**
   /**
    * Returns the nodes that currently host the block.
    * Returns the nodes that currently host the block.
    *
    *
-   * @return List<DatanodeInfo> nodes that currently host the block
+   * @return {@literal List<DatanodeInfo>} nodes that currently host the block
    */
    */
   public List<DatanodeInfo> getLocations() {
   public List<DatanodeInfo> getLocations() {
     return this.locations;
     return this.locations;

+ 1 - 1
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java

@@ -73,7 +73,7 @@ public interface StorageContainerLocationProtocol {
    * searching range cannot exceed the value of count.
    * searching range cannot exceed the value of count.
    *
    *
    * @param startContainerID start container ID.
    * @param startContainerID start container ID.
-   * @param count count, if count < 0, the max size is unlimited.(
+   * @param count count, if count {@literal <} 0, the max size is unlimited.(
    *              Usually the count will be replace with a very big
    *              Usually the count will be replace with a very big
    *              value instead of being unlimited in case the db is very big)
    *              value instead of being unlimited in case the db is very big)
    *
    *

+ 5 - 3
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java

@@ -171,7 +171,9 @@ public final class ContainerProtocolCalls  {
    * @param containerBlockData block data to identify container
    * @param containerBlockData block data to identify container
    * @param traceID container protocol call args
    * @param traceID container protocol call args
    * @return putBlockResponse
    * @return putBlockResponse
-   * @throws Exception if there is an error while performing the call
+   * @throws IOException if there is an error while performing the call
+   * @throws InterruptedException
+   * @throws ExecutionException
    */
    */
   public static XceiverClientAsyncReply putBlockAsync(
   public static XceiverClientAsyncReply putBlockAsync(
       XceiverClientSpi xceiverClient, BlockData containerBlockData,
       XceiverClientSpi xceiverClient, BlockData containerBlockData,
@@ -227,7 +229,7 @@ public final class ContainerProtocolCalls  {
    * @param blockID ID of the block
    * @param blockID ID of the block
    * @param data the data of the chunk to write
    * @param data the data of the chunk to write
    * @param traceID container protocol call args
    * @param traceID container protocol call args
-   * @throws Exception if there is an error while performing the call
+   * @throws IOException if there is an error while performing the call
    */
    */
   public static void writeChunk(XceiverClientSpi xceiverClient, ChunkInfo chunk,
   public static void writeChunk(XceiverClientSpi xceiverClient, ChunkInfo chunk,
       BlockID blockID, ByteString data, String traceID)
       BlockID blockID, ByteString data, String traceID)
@@ -471,7 +473,7 @@ public final class ContainerProtocolCalls  {
    * return code is mapped to a corresponding exception and thrown.
    * return code is mapped to a corresponding exception and thrown.
    *
    *
    * @param response container protocol call response
    * @param response container protocol call response
-   * @throws IOException if the container protocol call failed
+   * @throws StorageContainerException if the container protocol call failed
    */
    */
   public static void validateContainerResponse(
   public static void validateContainerResponse(
       ContainerCommandResponseProto response
       ContainerCommandResponseProto response

+ 1 - 1
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditable.java

@@ -25,7 +25,7 @@ import java.util.Map;
 public interface Auditable {
 public interface Auditable {
   /**
   /**
    * Must override in implementation.
    * Must override in implementation.
-   * @return Map<String, String> with values to be logged in audit.
+   * @return {@literal Map<String, String>} with values to be logged in audit.
    */
    */
   Map<String, String> toAuditMap();
   Map<String, String> toAuditMap();
 }
 }