Browse Source

HDFS-11894. Ozone: Cleanup imports. Contributed by Weiwei Yang.

Anu Engineer 8 years ago
parent
commit
988b25419c

+ 2 - 4
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/storage/ChunkInputStream.java

@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.scm.storage;
 
-import static org.apache.hadoop.scm.storage.ContainerProtocolCalls.*;
-
 import java.io.IOException;
 import java.io.InputStream;
 import java.nio.ByteBuffer;
@@ -179,8 +177,8 @@ public class ChunkInputStream extends InputStream {
       throws IOException {
     final ReadChunkResponseProto readChunkResponse;
     try {
-      readChunkResponse = readChunk(xceiverClient, chunks.get(readChunkOffset),
-          key, traceID);
+      readChunkResponse = ContainerProtocolCalls.readChunk(xceiverClient,
+          chunks.get(readChunkOffset), key, traceID);
     } catch (IOException e) {
       throw new IOException("Unexpected OzoneException", e);
     }

+ 1 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/OzoneClientUtils.java

@@ -20,7 +20,6 @@ package org.apache.hadoop.ozone;
 import com.google.common.base.Optional;
 
 import com.google.common.net.HostAndPort;
-import org.apache.avro.reflect.Nullable;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -585,8 +584,7 @@ public final class OzoneClientUtils {
    * @param conf configuration
    * @return a {@link CloseableHttpClient} instance.
    */
-  public static CloseableHttpClient newHttpClient(
-      @Nullable Configuration conf) {
+  public static CloseableHttpClient newHttpClient(Configuration conf) {
     int socketTimeout = OzoneConfigKeys
         .OZONE_CLIENT_SOCKET_TIMEOUT_MS_DEFAULT;
     int connectionTimeout = OzoneConfigKeys

+ 6 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerStorageLocation.java

@@ -30,7 +30,11 @@ import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.*;
+import java.io.File;
+import java.io.IOException;
+import java.io.FileNotFoundException;
+import java.io.OutputStreamWriter;
+import java.io.FileOutputStream;
 import java.net.URI;
 import java.nio.charset.StandardCharsets;
 import java.util.Scanner;
@@ -184,4 +188,4 @@ public class ContainerStorageLocation {
       IOUtils.cleanup(null, out);
     }
   }
-}
+}

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/exceptions/OzoneExceptionMapper.java

@@ -22,7 +22,7 @@ package org.apache.hadoop.ozone.web.exceptions;
 import javax.ws.rs.core.Response;
 import javax.ws.rs.ext.ExceptionMapper;
 
-import org.apache.log4j.MDC;
+import org.slf4j.MDC;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -42,4 +42,4 @@ public class OzoneExceptionMapper implements ExceptionMapper<OzoneException> {
       .entity(exception.toJsonString()).build();
   }
 
-}
+}

+ 0 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketProcessTemplate.java

@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.ozone.web.handlers;
 
-
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
 import org.apache.hadoop.ozone.web.exceptions.OzoneException;
@@ -34,7 +33,6 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.MDC;
 
-
 import javax.ws.rs.core.HttpHeaders;
 import javax.ws.rs.core.Request;
 import javax.ws.rs.core.Response;
@@ -52,9 +50,6 @@ import static org.apache.hadoop.ozone.OzoneConsts.OZONE_RESOURCE;
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_REQUEST;
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_USER;
 
-
-
-
 /**
  * This class abstracts way the repetitive tasks in
  * Bucket handling code.

+ 6 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/netty/ObjectStoreJerseyContainer.java

@@ -17,8 +17,12 @@
  */
 package org.apache.hadoop.ozone.web.netty;
 
-import static io.netty.handler.codec.http.HttpHeaders.Names.*;
-import static io.netty.handler.codec.http.HttpHeaders.Values.*;
+import static io.netty.handler.codec.http.HttpHeaders.Names.CONTENT_LENGTH;
+import static io.netty.handler.codec.http.HttpHeaders.Names.CONNECTION;
+import static io.netty.handler.codec.http.HttpHeaders.Names.TRANSFER_ENCODING;
+import static io.netty.handler.codec.http.HttpHeaders.Names.HOST;
+import static io.netty.handler.codec.http.HttpHeaders.Values.KEEP_ALIVE;
+import static io.netty.handler.codec.http.HttpHeaders.Values.CLOSE;
 import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1;
 
 import java.io.IOException;

+ 1 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/request/OzoneQuota.java

@@ -18,11 +18,10 @@
 
 package org.apache.hadoop.ozone.web.request;
 
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.web.headers.Header;
-import org.codehaus.jackson.annotate.JsonIgnore;
+import com.fasterxml.jackson.annotation.JsonIgnore;
 
 /**
  * represents an OzoneQuota Object that can be applied to

+ 2 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/VolumeOwner.java

@@ -18,9 +18,8 @@
 
 package org.apache.hadoop.ozone.web.response;
 
-
+import com.fasterxml.jackson.annotation.JsonInclude;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.codehaus.jackson.map.annotate.JsonSerialize;
 
 /**
  * Volume Owner represents the owner of a volume.
@@ -30,7 +29,7 @@ import org.codehaus.jackson.map.annotate.JsonSerialize;
  */
 @InterfaceAudience.Private
 public class VolumeOwner {
-  @JsonSerialize(include=JsonSerialize.Inclusion.NON_NULL)
+  @JsonInclude(JsonInclude.Include.NON_NULL)
   private String name;
 
   /**

+ 11 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java

@@ -53,7 +53,12 @@ import org.apache.hadoop.ozone.web.handlers.KeyArgs;
 import org.apache.hadoop.ozone.web.handlers.ListArgs;
 import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
 import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
-import org.apache.hadoop.ozone.web.response.*;
+import org.apache.hadoop.ozone.web.response.ListVolumes;
+import org.apache.hadoop.ozone.web.response.VolumeInfo;
+import org.apache.hadoop.ozone.web.response.VolumeOwner;
+import org.apache.hadoop.ozone.web.response.ListBuckets;
+import org.apache.hadoop.ozone.web.response.BucketInfo;
+import org.apache.hadoop.ozone.web.response.ListKeys;
 import org.apache.hadoop.scm.XceiverClientSpi;
 import org.apache.hadoop.scm.storage.ChunkInputStream;
 import org.apache.hadoop.scm.storage.ChunkOutputStream;
@@ -74,9 +79,6 @@ import java.util.Arrays;
 import java.util.List;
 import java.util.stream.Collectors;
 
-import static org.apache.hadoop.ozone.web.storage.OzoneContainerTranslation.*;
-import static org.apache.hadoop.scm.storage.ContainerProtocolCalls.getKey;
-
 /**
  * A {@link StorageHandler} implementation that distributes object storage
  * across the nodes of an HDFS cluster.
@@ -356,10 +358,11 @@ public final class DistributedStorageHandler implements StorageHandler {
     try {
       LOG.debug("get key accessing {} {}",
           xceiverClient.getPipeline().getContainerName(), containerKey);
-      KeyData containerKeyData = containerKeyDataForRead(
-          xceiverClient.getPipeline().getContainerName(), containerKey);
-      GetKeyResponseProto response = getKey(xceiverClient, containerKeyData,
-          args.getRequestID());
+      KeyData containerKeyData = OzoneContainerTranslation
+          .containerKeyDataForRead(
+              xceiverClient.getPipeline().getContainerName(), containerKey);
+      GetKeyResponseProto response = ContainerProtocolCalls
+          .getKey(xceiverClient, containerKeyData, args.getRequestID());
       long length = 0;
       List<ChunkInfo> chunks = response.getKeyData().getChunksList();
       for (ChunkInfo chunk : chunks) {