HDFS-11894. Ozone: Cleanup imports. Contributed by Weiwei Yang.
This commit is contained in:
parent
677dcf529c
commit
a8c0976fb4
|
@ -18,8 +18,6 @@
|
|||
|
||||
package org.apache.hadoop.scm.storage;
|
||||
|
||||
import static org.apache.hadoop.scm.storage.ContainerProtocolCalls.*;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.ByteBuffer;
|
||||
|
@ -179,8 +177,8 @@ public class ChunkInputStream extends InputStream {
|
|||
throws IOException {
|
||||
final ReadChunkResponseProto readChunkResponse;
|
||||
try {
|
||||
readChunkResponse = readChunk(xceiverClient, chunks.get(readChunkOffset),
|
||||
key, traceID);
|
||||
readChunkResponse = ContainerProtocolCalls.readChunk(xceiverClient,
|
||||
chunks.get(readChunkOffset), key, traceID);
|
||||
} catch (IOException e) {
|
||||
throw new IOException("Unexpected OzoneException", e);
|
||||
}
|
||||
|
|
|
@ -20,7 +20,6 @@ package org.apache.hadoop.ozone;
|
|||
import com.google.common.base.Optional;
|
||||
|
||||
import com.google.common.net.HostAndPort;
|
||||
import org.apache.avro.reflect.Nullable;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -585,8 +584,7 @@ public final class OzoneClientUtils {
|
|||
* @param conf configuration
|
||||
* @return a {@link CloseableHttpClient} instance.
|
||||
*/
|
||||
public static CloseableHttpClient newHttpClient(
|
||||
@Nullable Configuration conf) {
|
||||
public static CloseableHttpClient newHttpClient(Configuration conf) {
|
||||
int socketTimeout = OzoneConfigKeys
|
||||
.OZONE_CLIENT_SOCKET_TIMEOUT_MS_DEFAULT;
|
||||
int connectionTimeout = OzoneConfigKeys
|
||||
|
|
|
@ -30,7 +30,11 @@ import org.apache.hadoop.util.Time;
|
|||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.*;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.OutputStreamWriter;
|
||||
import java.io.FileOutputStream;
|
||||
import java.net.URI;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Scanner;
|
||||
|
|
|
@ -22,7 +22,7 @@ package org.apache.hadoop.ozone.web.exceptions;
|
|||
import javax.ws.rs.core.Response;
|
||||
import javax.ws.rs.ext.ExceptionMapper;
|
||||
|
||||
import org.apache.log4j.MDC;
|
||||
import org.slf4j.MDC;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
|
||||
package org.apache.hadoop.ozone.web.handlers;
|
||||
|
||||
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
|
||||
import org.apache.hadoop.ozone.web.exceptions.OzoneException;
|
||||
|
@ -34,7 +33,6 @@ import org.slf4j.Logger;
|
|||
import org.slf4j.LoggerFactory;
|
||||
import org.slf4j.MDC;
|
||||
|
||||
|
||||
import javax.ws.rs.core.HttpHeaders;
|
||||
import javax.ws.rs.core.Request;
|
||||
import javax.ws.rs.core.Response;
|
||||
|
@ -52,9 +50,6 @@ import static org.apache.hadoop.ozone.OzoneConsts.OZONE_RESOURCE;
|
|||
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_REQUEST;
|
||||
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_USER;
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* This class abstracts way the repetitive tasks in
|
||||
* Bucket handling code.
|
||||
|
|
|
@ -17,12 +17,12 @@
|
|||
*/
|
||||
package org.apache.hadoop.ozone.web.netty;
|
||||
|
||||
import static io.netty.handler.codec.http.HttpHeaderNames.CONNECTION;
|
||||
import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_LENGTH;
|
||||
import static io.netty.handler.codec.http.HttpHeaderNames.HOST;
|
||||
import static io.netty.handler.codec.http.HttpHeaderNames.TRANSFER_ENCODING;
|
||||
import static io.netty.handler.codec.http.HttpHeaderValues.CLOSE;
|
||||
import static io.netty.handler.codec.http.HttpHeaderValues.KEEP_ALIVE;
|
||||
import static io.netty.handler.codec.http.HttpHeaders.Names.CONTENT_LENGTH;
|
||||
import static io.netty.handler.codec.http.HttpHeaders.Names.CONNECTION;
|
||||
import static io.netty.handler.codec.http.HttpHeaders.Names.TRANSFER_ENCODING;
|
||||
import static io.netty.handler.codec.http.HttpHeaders.Names.HOST;
|
||||
import static io.netty.handler.codec.http.HttpHeaders.Values.KEEP_ALIVE;
|
||||
import static io.netty.handler.codec.http.HttpHeaders.Values.CLOSE;
|
||||
import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1;
|
||||
|
||||
import java.io.IOException;
|
||||
|
|
|
@ -18,11 +18,10 @@
|
|||
|
||||
package org.apache.hadoop.ozone.web.request;
|
||||
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.ozone.OzoneConsts;
|
||||
import org.apache.hadoop.ozone.web.headers.Header;
|
||||
import org.codehaus.jackson.annotate.JsonIgnore;
|
||||
import com.fasterxml.jackson.annotation.JsonIgnore;
|
||||
|
||||
/**
|
||||
* represents an OzoneQuota Object that can be applied to
|
||||
|
|
|
@ -18,9 +18,8 @@
|
|||
|
||||
package org.apache.hadoop.ozone.web.response;
|
||||
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonInclude;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.codehaus.jackson.map.annotate.JsonSerialize;
|
||||
|
||||
/**
|
||||
* Volume Owner represents the owner of a volume.
|
||||
|
@ -30,7 +29,7 @@ import org.codehaus.jackson.map.annotate.JsonSerialize;
|
|||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class VolumeOwner {
|
||||
@JsonSerialize(include=JsonSerialize.Inclusion.NON_NULL)
|
||||
@JsonInclude(JsonInclude.Include.NON_NULL)
|
||||
private String name;
|
||||
|
||||
/**
|
||||
|
|
|
@ -53,7 +53,12 @@ import org.apache.hadoop.ozone.web.handlers.KeyArgs;
|
|||
import org.apache.hadoop.ozone.web.handlers.ListArgs;
|
||||
import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
|
||||
import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
|
||||
import org.apache.hadoop.ozone.web.response.*;
|
||||
import org.apache.hadoop.ozone.web.response.ListVolumes;
|
||||
import org.apache.hadoop.ozone.web.response.VolumeInfo;
|
||||
import org.apache.hadoop.ozone.web.response.VolumeOwner;
|
||||
import org.apache.hadoop.ozone.web.response.ListBuckets;
|
||||
import org.apache.hadoop.ozone.web.response.BucketInfo;
|
||||
import org.apache.hadoop.ozone.web.response.ListKeys;
|
||||
import org.apache.hadoop.scm.XceiverClientSpi;
|
||||
import org.apache.hadoop.scm.storage.ChunkInputStream;
|
||||
import org.apache.hadoop.scm.storage.ChunkOutputStream;
|
||||
|
@ -74,9 +79,6 @@ import java.util.Arrays;
|
|||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.apache.hadoop.ozone.web.storage.OzoneContainerTranslation.*;
|
||||
import static org.apache.hadoop.scm.storage.ContainerProtocolCalls.getKey;
|
||||
|
||||
/**
|
||||
* A {@link StorageHandler} implementation that distributes object storage
|
||||
* across the nodes of an HDFS cluster.
|
||||
|
@ -356,10 +358,11 @@ public final class DistributedStorageHandler implements StorageHandler {
|
|||
try {
|
||||
LOG.debug("get key accessing {} {}",
|
||||
xceiverClient.getPipeline().getContainerName(), containerKey);
|
||||
KeyData containerKeyData = containerKeyDataForRead(
|
||||
KeyData containerKeyData = OzoneContainerTranslation
|
||||
.containerKeyDataForRead(
|
||||
xceiverClient.getPipeline().getContainerName(), containerKey);
|
||||
GetKeyResponseProto response = getKey(xceiverClient, containerKeyData,
|
||||
args.getRequestID());
|
||||
GetKeyResponseProto response = ContainerProtocolCalls
|
||||
.getKey(xceiverClient, containerKeyData, args.getRequestID());
|
||||
long length = 0;
|
||||
List<ChunkInfo> chunks = response.getKeyData().getChunksList();
|
||||
for (ChunkInfo chunk : chunks) {
|
||||
|
|
Loading…
Reference in New Issue