HDFS-12549. Ozone: OzoneClient: Support for REST protocol. Contributed by Nanda Kumar.

This commit is contained in:
Xiaoyu Yao 2017-11-08 15:31:06 -08:00 committed by Owen O'Malley
parent 9734f505ea
commit cec96b296f
27 changed files with 1905 additions and 55 deletions

View File

@ -86,7 +86,7 @@ public static OzoneAcl parseAcl(String acl) throws IllegalArgumentException {
@Override
public String toString() {
return type+":" + name + ":" + rights;
return type + ":" + name + ":" + OzoneACLRights.getACLRightsString(rights);
}
/**
@ -207,5 +207,25 @@ public static OzoneACLRights getACLRight(String type) {
}
}
/**
* Returns String representation of ACL rights.
* @param acl OzoneACLRights
* @return String representation of acl
*/
public static String getACLRightsString(OzoneACLRights acl) {
switch(acl) {
case READ:
return OzoneConsts.OZONE_ACL_READ;
case WRITE:
return OzoneConsts.OZONE_ACL_WRITE;
case READ_WRITE:
return OzoneConsts.OZONE_ACL_READ_WRITE;
default:
throw new IllegalArgumentException("ACL right is not recognized");
}
}
}
}

View File

@ -116,6 +116,23 @@ public final class OzoneConfigKeys {
public static final Class<? extends ClientProtocol>
OZONE_CLIENT_PROTOCOL_REST = RestClient.class;
public static final String OZONE_REST_SERVERS = "ozone.rest.servers";
public static final String OZONE_REST_CLIENT_PORT = "ozone.rest.client.port";
public static final int OZONE_REST_CLIENT_PORT_DEFAULT = 9864;
// This defines the overall connection limit for the connection pool used in
// RestClient.
public static final String OZONE_REST_CLIENT_HTTP_CONNECTION_MAX =
"ozone.rest.client.http.connection.max";
public static final int OZONE_REST_CLIENT_HTTP_CONNECTION_DEFAULT = 100;
// This defines the connection limit per one HTTP route/host.
public static final String OZONE_REST_CLIENT_HTTP_CONNECTION_PER_ROUTE_MAX =
"ozone.rest.client.http.connection.per-route.max";
public static final int
OZONE_REST_CLIENT_HTTP_CONNECTION_PER_ROUTE_MAX_DEFAULT = 20;
public static final String OZONE_CLIENT_SOCKET_TIMEOUT_MS =
"ozone.client.socket.timeout.ms";
public static final int OZONE_CLIENT_SOCKET_TIMEOUT_MS_DEFAULT = 5000;

View File

@ -20,6 +20,7 @@
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.net.HostAndPort;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@ -38,6 +39,11 @@
import org.slf4j.LoggerFactory;
import java.net.InetSocketAddress;
import java.text.ParseException;
import java.time.Instant;
import java.time.ZoneId;
import java.time.ZonedDateTime;
import java.time.format.DateTimeFormatter;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
@ -97,6 +103,16 @@ public final class OzoneClientUtils {
OzoneClientUtils.class);
private static final int NO_PORT = -1;
/**
* Date format that used in ozone. Here the format is thread safe to use.
*/
private static final ThreadLocal<DateTimeFormatter> DATE_FORMAT =
ThreadLocal.withInitial(() -> {
DateTimeFormatter format =
DateTimeFormatter.ofPattern(OzoneConsts.OZONE_DATE_FORMAT);
return format.withZone(ZoneId.of(OzoneConsts.OZONE_TIME_ZONE));
});
/**
* The service ID of the solitary Ozone SCM service.
*/
@ -822,4 +838,24 @@ public static void verifyResourceName(String resName)
"Bucket or Volume name cannot be an IPv4 address or all numeric");
}
}
/**
* Convert time in millisecond to a human readable format required in ozone.
* @return a human readable string for the input time
*/
public static String formatDateTime(long millis) {
ZonedDateTime dateTime = ZonedDateTime.ofInstant(
Instant.ofEpochSecond(millis), DATE_FORMAT.get().getZone());
return DATE_FORMAT.get().format(dateTime);
}
/**
* Convert time in ozone date format to millisecond.
* @return time in milliseconds
*/
public static long formatDateTime(String date) throws ParseException {
Preconditions.checkNotNull(date, "Date string should not be null.");
return ZonedDateTime.parse(date, DATE_FORMAT.get())
.toInstant().getEpochSecond();
}
}

View File

@ -33,7 +33,7 @@ public class OzoneQuota {
public static final String OZONE_QUOTA_TB = "TB";
private Units unit;
private int size;
private long size;
/** Quota Units.*/
public enum Units {UNDEFINED, BYTES, KB, MB, GB, TB}
@ -41,9 +41,9 @@ public enum Units {UNDEFINED, BYTES, KB, MB, GB, TB}
/**
* Returns size.
*
* @return int
* @return long
*/
public int getSize() {
public long getSize() {
return size;
}
@ -67,10 +67,10 @@ public OzoneQuota() {
/**
* Constructor for Ozone Quota.
*
* @param size - Integer Size
* @param size Long Size
* @param unit MB, GB or TB
*/
public OzoneQuota(int size, Units unit) {
public OzoneQuota(long size, Units unit) {
this.size = size;
this.unit = unit;
}
@ -195,4 +195,9 @@ public static OzoneQuota getOzoneQuota(long sizeInBytes) {
}
return new OzoneQuota((int)size, unit);
}
@Override
public String toString() {
return size + " " + unit;
}
}

View File

@ -28,14 +28,14 @@
*/
public class OzoneInputStream extends InputStream {
private final ChunkGroupInputStream inputStream;
private final InputStream inputStream;
/**
* Constructs OzoneInputStream with ChunkInputStream.
*
* @param inputStream
*/
public OzoneInputStream(ChunkGroupInputStream inputStream) {
public OzoneInputStream(InputStream inputStream) {
this.inputStream = inputStream;
}

View File

@ -26,14 +26,14 @@
*/
public class OzoneOutputStream extends OutputStream {
private final ChunkGroupOutputStream outputStream;
private final OutputStream outputStream;
/**
* Constructs OzoneOutputStream with ChunkGroupOutputStream.
*
* @param outputStream
*/
public OzoneOutputStream(ChunkGroupOutputStream outputStream) {
public OzoneOutputStream(OutputStream outputStream) {
this.outputStream = outputStream;
}

View File

@ -22,26 +22,81 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.client.BucketArgs;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClientUtils;
import org.apache.hadoop.ozone.client.OzoneKey;
import org.apache.hadoop.ozone.client.OzoneQuota;
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.client.ReplicationFactor;
import org.apache.hadoop.ozone.client.ReplicationType;
import org.apache.hadoop.ozone.client.VolumeArgs;
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
import java.io.IOException;
import org.apache.hadoop.ozone.client.rest.headers.Header;
import org.apache.hadoop.ozone.client.rest.response.BucketInfo;
import org.apache.hadoop.ozone.client.rest.response.KeyInfo;
import org.apache.hadoop.ozone.client.rest.response.VolumeInfo;
import org.apache.hadoop.ozone.client.rpc.RpcClient;
import org.apache.hadoop.ozone.ksm.KSMConfigKeys;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Time;
import org.apache.http.HttpEntity;
import org.apache.http.HttpHeaders;
import org.apache.http.HttpResponse;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.HttpDelete;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.methods.HttpPut;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.client.utils.URIBuilder;
import org.apache.http.entity.InputStreamEntity;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
import org.apache.http.util.EntityUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.PipedInputStream;
import java.io.PipedOutputStream;
import java.net.URI;
import java.net.URISyntaxException;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.FutureTask;
import static java.net.HttpURLConnection.HTTP_CREATED;
import static java.net.HttpURLConnection.HTTP_OK;
/**
* Ozone Client REST protocol implementation. It uses REST protocol to
* connect to Ozone Handler that executes client calls
* connect to Ozone Handler that executes client calls. RestClient uses
* <code>ozone.rest.servers</code> and <code>ozone.rest.client.port</code>
* to discover Ozone Rest Server.
*/
public class RestClient implements ClientProtocol {
private static final String PATH_SEPARATOR = "/";
private static final Logger LOG = LoggerFactory.getLogger(RpcClient.class);
private final Configuration conf;
private final URI ozoneRestUri;
private final CloseableHttpClient httpClient;
private final UserGroupInformation ugi;
private final OzoneAcl.OzoneACLRights userRights;
/**
* Creates RestClient instance with the given configuration.
* @param conf Configuration
@ -49,37 +104,186 @@ public class RestClient implements ClientProtocol {
*/
public RestClient(Configuration conf)
throws IOException {
Preconditions.checkNotNull(conf);
try {
Preconditions.checkNotNull(conf);
this.conf = conf;
int port = conf.getInt(OzoneConfigKeys.OZONE_REST_CLIENT_PORT,
OzoneConfigKeys.OZONE_REST_CLIENT_PORT_DEFAULT);
URIBuilder uriBuilder = new URIBuilder()
.setScheme("http")
.setHost(getOzoneRestHandlerHost())
.setPort(port);
this.ozoneRestUri = uriBuilder.build();
int socketTimeout = conf.getInt(
OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_MS,
OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_MS_DEFAULT);
int connectionTimeout = conf.getInt(
OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT_MS,
OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT_MS_DEFAULT);
int maxConnection = conf.getInt(
OzoneConfigKeys.OZONE_REST_CLIENT_HTTP_CONNECTION_MAX,
OzoneConfigKeys.OZONE_REST_CLIENT_HTTP_CONNECTION_DEFAULT);
int maxConnectionPerRoute = conf.getInt(
OzoneConfigKeys.OZONE_REST_CLIENT_HTTP_CONNECTION_PER_ROUTE_MAX,
OzoneConfigKeys.OZONE_REST_CLIENT_HTTP_CONNECTION_PER_ROUTE_MAX_DEFAULT
);
/*
To make RestClient Thread safe, creating the HttpClient with
ThreadSafeClientConnManager.
*/
PoolingHttpClientConnectionManager connManager =
new PoolingHttpClientConnectionManager();
connManager.setMaxTotal(maxConnection);
connManager.setDefaultMaxPerRoute(maxConnectionPerRoute);
this.httpClient = HttpClients.custom()
.setConnectionManager(connManager)
.setDefaultRequestConfig(
RequestConfig.custom()
.setSocketTimeout(socketTimeout)
.setConnectTimeout(connectionTimeout)
.build())
.build();
this.ugi = UserGroupInformation.getCurrentUser();
this.userRights = conf.getEnum(KSMConfigKeys.OZONE_KSM_USER_RIGHTS,
KSMConfigKeys.OZONE_KSM_USER_RIGHTS_DEFAULT);
} catch (URISyntaxException e) {
throw new IOException(e);
}
}
/**
* Returns the REST server host to connect to.
*
* @return hostname of REST server
*/
private String getOzoneRestHandlerHost() {
List<String> servers = new ArrayList<>(conf.getTrimmedStringCollection(
OzoneConfigKeys.OZONE_REST_SERVERS));
if(servers.isEmpty()) {
throw new IllegalArgumentException(OzoneConfigKeys.OZONE_REST_SERVERS +
" must be defined. See" +
" https://wiki.apache.org/hadoop/Ozone#Configuration for" +
" details on configuring Ozone.");
}
return servers.get(new Random().nextInt(servers.size()));
}
@Override
public void createVolume(String volumeName) throws IOException {
throw new UnsupportedOperationException("Not yet implemented.");
createVolume(volumeName, VolumeArgs.newBuilder().build());
}
@Override
public void createVolume(
String volumeName, org.apache.hadoop.ozone.client.VolumeArgs args)
public void createVolume(String volumeName, VolumeArgs volArgs)
throws IOException {
throw new UnsupportedOperationException("Not yet implemented.");
try {
Preconditions.checkNotNull(volumeName);
URIBuilder builder = new URIBuilder(ozoneRestUri);
String owner = volArgs.getOwner() == null ?
ugi.getUserName() : volArgs.getOwner();
//TODO: support for ACLs has to be done in OzoneHandler (rest server)
/**
List<OzoneAcl> listOfAcls = new ArrayList<>();
//User ACL
listOfAcls.add(new OzoneAcl(OzoneAcl.OzoneACLType.USER,
owner, userRights));
//ACLs from VolumeArgs
if(volArgs.getAcls() != null) {
listOfAcls.addAll(volArgs.getAcls());
}
*/
builder.setPath(PATH_SEPARATOR + volumeName);
String quota = volArgs.getQuota();
if(quota != null) {
builder.setParameter(Header.OZONE_QUOTA_QUERY_TAG, quota);
}
HttpPost httpPost = new HttpPost(builder.build());
addOzoneHeaders(httpPost);
//use admin from VolumeArgs, if it's present
if(volArgs.getAdmin() != null) {
httpPost.removeHeaders(HttpHeaders.AUTHORIZATION);
httpPost.addHeader(HttpHeaders.AUTHORIZATION,
Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " +
volArgs.getAdmin());
}
httpPost.addHeader(Header.OZONE_USER, owner);
LOG.info("Creating Volume: {}, with {} as owner and quota set to {}.",
volumeName, owner, quota == null ? "default" : quota);
EntityUtils.consume(executeHttpRequest(httpPost));
} catch (URISyntaxException e) {
throw new IOException(e);
}
}
@Override
public void setVolumeOwner(String volumeName, String owner)
throws IOException {
throw new UnsupportedOperationException("Not yet implemented.");
try {
Preconditions.checkNotNull(volumeName);
Preconditions.checkNotNull(owner);
URIBuilder builder = new URIBuilder(ozoneRestUri);
builder.setPath(PATH_SEPARATOR + volumeName);
HttpPut httpPut = new HttpPut(builder.build());
addOzoneHeaders(httpPut);
httpPut.addHeader(Header.OZONE_USER, owner);
EntityUtils.consume(executeHttpRequest(httpPut));
} catch (URISyntaxException e) {
throw new IOException(e);
}
}
@Override
public void setVolumeQuota(String volumeName, OzoneQuota quota)
throws IOException {
throw new UnsupportedOperationException("Not yet implemented.");
try {
Preconditions.checkNotNull(volumeName);
Preconditions.checkNotNull(quota);
String quotaString = quota.toString();
URIBuilder builder = new URIBuilder(ozoneRestUri);
builder.setPath(PATH_SEPARATOR + volumeName);
builder.setParameter(Header.OZONE_QUOTA_QUERY_TAG, quotaString);
HttpPut httpPut = new HttpPut(builder.build());
addOzoneHeaders(httpPut);
EntityUtils.consume(executeHttpRequest(httpPut));
} catch (URISyntaxException e) {
throw new IOException(e);
}
}
@Override
public OzoneVolume getVolumeDetails(String volumeName)
throws IOException {
throw new UnsupportedOperationException("Not yet implemented.");
try {
Preconditions.checkNotNull(volumeName);
URIBuilder builder = new URIBuilder(ozoneRestUri);
builder.setPath(PATH_SEPARATOR + volumeName);
builder.setParameter(Header.OZONE_INFO_QUERY_TAG,
Header.OZONE_INFO_QUERY_VOLUME);
HttpGet httpGet = new HttpGet(builder.build());
addOzoneHeaders(httpGet);
HttpEntity response = executeHttpRequest(httpGet);
VolumeInfo volInfo =
VolumeInfo.parse(EntityUtils.toString(response));
//TODO: OzoneHandler in datanode has to be modified to send ACLs
OzoneVolume volume = new OzoneVolume(conf,
this,
volInfo.getVolumeName(),
volInfo.getCreatedBy(),
volInfo.getOwner().getName(),
volInfo.getQuota().sizeInBytes(),
OzoneClientUtils.formatDateTime(volInfo.getCreatedOn()),
null);
EntityUtils.consume(response);
return volume;
} catch (URISyntaxException | ParseException e) {
throw new IOException(e);
}
}
@Override
@ -90,7 +294,16 @@ public boolean checkVolumeAccess(String volumeName, OzoneAcl acl)
@Override
public void deleteVolume(String volumeName) throws IOException {
throw new UnsupportedOperationException("Not yet implemented.");
try {
Preconditions.checkNotNull(volumeName);
URIBuilder builder = new URIBuilder(ozoneRestUri);
builder.setPath(PATH_SEPARATOR + volumeName);
HttpDelete httpDelete = new HttpDelete(builder.build());
addOzoneHeaders(httpDelete);
EntityUtils.consume(executeHttpRequest(httpDelete));
} catch (URISyntaxException e) {
throw new IOException(e);
}
}
@Override
@ -110,48 +323,161 @@ public List<OzoneVolume> listVolumes(String user, String volumePrefix,
@Override
public void createBucket(String volumeName, String bucketName)
throws IOException {
throw new UnsupportedOperationException("Not yet implemented.");
createBucket(volumeName, bucketName, BucketArgs.newBuilder().build());
}
@Override
public void createBucket(
String volumeName, String bucketName, BucketArgs bucketArgs)
throws IOException {
throw new UnsupportedOperationException("Not yet implemented.");
try {
Preconditions.checkNotNull(volumeName);
Preconditions.checkNotNull(bucketName);
Preconditions.checkNotNull(bucketArgs);
URIBuilder builder = new URIBuilder(ozoneRestUri);
OzoneConsts.Versioning versioning = OzoneConsts.Versioning.DISABLED;
if(bucketArgs.getVersioning() != null &&
bucketArgs.getVersioning()) {
versioning = OzoneConsts.Versioning.ENABLED;
}
StorageType storageType = bucketArgs.getStorageType() == null ?
StorageType.DEFAULT : bucketArgs.getStorageType();
builder.setPath(PATH_SEPARATOR + volumeName +
PATH_SEPARATOR + bucketName);
HttpPost httpPost = new HttpPost(builder.build());
addOzoneHeaders(httpPost);
//ACLs from BucketArgs
if(bucketArgs.getAcls() != null) {
for (OzoneAcl acl : bucketArgs.getAcls()) {
httpPost.addHeader(
Header.OZONE_ACLS, Header.OZONE_ACL_ADD + " " + acl.toString());
}
}
httpPost.addHeader(Header.OZONE_STORAGE_TYPE, storageType.toString());
httpPost.addHeader(Header.OZONE_BUCKET_VERSIONING,
versioning.toString());
LOG.info("Creating Bucket: {}/{}, with Versioning {} and Storage Type" +
" set to {}", volumeName, bucketName, versioning,
storageType);
EntityUtils.consume(executeHttpRequest(httpPost));
} catch (URISyntaxException e) {
throw new IOException(e);
}
}
@Override
public void addBucketAcls(
String volumeName, String bucketName, List<OzoneAcl> addAcls)
throws IOException {
throw new UnsupportedOperationException("Not yet implemented.");
try {
Preconditions.checkNotNull(volumeName);
Preconditions.checkNotNull(bucketName);
Preconditions.checkNotNull(addAcls);
URIBuilder builder = new URIBuilder(ozoneRestUri);
builder.setPath(PATH_SEPARATOR + volumeName +
PATH_SEPARATOR + bucketName);
HttpPut httpPut = new HttpPut(builder.build());
addOzoneHeaders(httpPut);
for (OzoneAcl acl : addAcls) {
httpPut.addHeader(
Header.OZONE_ACLS, Header.OZONE_ACL_ADD + " " + acl.toString());
}
EntityUtils.consume(executeHttpRequest(httpPut));
} catch (URISyntaxException e) {
throw new IOException(e);
}
}
@Override
public void removeBucketAcls(
String volumeName, String bucketName, List<OzoneAcl> removeAcls)
throws IOException {
throw new UnsupportedOperationException("Not yet implemented.");
try {
Preconditions.checkNotNull(volumeName);
Preconditions.checkNotNull(bucketName);
Preconditions.checkNotNull(removeAcls);
URIBuilder builder = new URIBuilder(ozoneRestUri);
builder.setPath(PATH_SEPARATOR + volumeName +
PATH_SEPARATOR + bucketName);
HttpPut httpPut = new HttpPut(builder.build());
addOzoneHeaders(httpPut);
for (OzoneAcl acl : removeAcls) {
httpPut.addHeader(
Header.OZONE_ACLS, Header.OZONE_ACL_REMOVE + " " + acl.toString());
}
EntityUtils.consume(executeHttpRequest(httpPut));
} catch (URISyntaxException e) {
throw new IOException(e);
}
}
@Override
public void setBucketVersioning(
String volumeName, String bucketName, Boolean versioning)
throws IOException {
throw new UnsupportedOperationException("Not yet implemented.");
try {
Preconditions.checkNotNull(volumeName);
Preconditions.checkNotNull(bucketName);
Preconditions.checkNotNull(versioning);
URIBuilder builder = new URIBuilder(ozoneRestUri);
builder.setPath(PATH_SEPARATOR + volumeName +
PATH_SEPARATOR + bucketName);
HttpPut httpPut = new HttpPut(builder.build());
addOzoneHeaders(httpPut);
httpPut.addHeader(Header.OZONE_BUCKET_VERSIONING,
getBucketVersioning(versioning).toString());
EntityUtils.consume(executeHttpRequest(httpPut));
} catch (URISyntaxException e) {
throw new IOException(e);
}
}
@Override
public void setBucketStorageType(
String volumeName, String bucketName, StorageType storageType)
throws IOException {
throw new UnsupportedOperationException("Not yet implemented.");
try {
Preconditions.checkNotNull(volumeName);
Preconditions.checkNotNull(bucketName);
Preconditions.checkNotNull(storageType);
URIBuilder builder = new URIBuilder(ozoneRestUri);
builder.setPath(PATH_SEPARATOR + volumeName +
PATH_SEPARATOR + bucketName);
HttpPut httpPut = new HttpPut(builder.build());
addOzoneHeaders(httpPut);
httpPut.addHeader(Header.OZONE_STORAGE_TYPE, storageType.toString());
EntityUtils.consume(executeHttpRequest(httpPut));
} catch (URISyntaxException e) {
throw new IOException(e);
}
}
@Override
public void deleteBucket(String volumeName, String bucketName)
throws IOException {
throw new UnsupportedOperationException("Not yet implemented.");
try {
Preconditions.checkNotNull(volumeName);
Preconditions.checkNotNull(bucketName);
URIBuilder builder = new URIBuilder(ozoneRestUri);
builder.setPath(PATH_SEPARATOR + volumeName +
PATH_SEPARATOR + bucketName);
HttpDelete httpDelete = new HttpDelete(builder.build());
addOzoneHeaders(httpDelete);
EntityUtils.consume(executeHttpRequest(httpDelete));
} catch (URISyntaxException e) {
throw new IOException(e);
}
}
@Override
@ -163,7 +489,32 @@ public void checkBucketAccess(String volumeName, String bucketName)
@Override
public OzoneBucket getBucketDetails(String volumeName, String bucketName)
throws IOException {
throw new UnsupportedOperationException("Not yet implemented.");
try {
Preconditions.checkNotNull(volumeName);
Preconditions.checkNotNull(bucketName);
URIBuilder builder = new URIBuilder(ozoneRestUri);
builder.setPath(PATH_SEPARATOR + volumeName +
PATH_SEPARATOR + bucketName);
builder.setParameter(Header.OZONE_INFO_QUERY_TAG,
Header.OZONE_INFO_QUERY_BUCKET);
HttpGet httpGet = new HttpGet(builder.build());
addOzoneHeaders(httpGet);
HttpEntity response = executeHttpRequest(httpGet);
BucketInfo bucketInfo =
BucketInfo.parse(EntityUtils.toString(response));
OzoneBucket bucket = new OzoneBucket(conf,
this,
bucketInfo.getVolumeName(),
bucketInfo.getBucketName(),
bucketInfo.getAcls(),
bucketInfo.getStorageType(),
getBucketVersioningFlag(bucketInfo.getVersioning()),
OzoneClientUtils.formatDateTime(bucketInfo.getCreatedOn()));
EntityUtils.consume(response);
return bucket;
} catch (URISyntaxException | ParseException e) {
throw new IOException(e);
}
}
@Override
@ -188,20 +539,109 @@ public OzoneOutputStream createKey(
String volumeName, String bucketName, String keyName, long size,
ReplicationType type, ReplicationFactor factor)
throws IOException {
throw new UnsupportedOperationException("Not yet implemented.");
// TODO: Once ReplicationType and ReplicationFactor are supported in
// OzoneHandler (in Datanode), set them in header.
try {
Preconditions.checkNotNull(volumeName);
Preconditions.checkNotNull(bucketName);
Preconditions.checkNotNull(keyName);
URIBuilder builder = new URIBuilder(ozoneRestUri);
builder.setPath(PATH_SEPARATOR + volumeName +
PATH_SEPARATOR + bucketName +
PATH_SEPARATOR + keyName);
HttpPut putRequest = new HttpPut(builder.build());
addOzoneHeaders(putRequest);
PipedInputStream in = new PipedInputStream();
OutputStream out = new PipedOutputStream(in);
putRequest.setEntity(new InputStreamEntity(in, size));
FutureTask<HttpEntity> futureTask =
new FutureTask<>(() -> executeHttpRequest(putRequest));
new Thread(futureTask).start();
OzoneOutputStream outputStream = new OzoneOutputStream(
new OutputStream() {
@Override
public void write(int b) throws IOException {
out.write(b);
}
@Override
public void close() throws IOException {
try {
out.close();
EntityUtils.consume(futureTask.get());
} catch (ExecutionException | InterruptedException e) {
throw new IOException(e);
}
}
});
return outputStream;
} catch (URISyntaxException e) {
throw new IOException(e);
}
}
@Override
public OzoneInputStream getKey(
String volumeName, String bucketName, String keyName)
throws IOException {
throw new UnsupportedOperationException("Not yet implemented.");
try {
Preconditions.checkNotNull(volumeName);
Preconditions.checkNotNull(bucketName);
Preconditions.checkNotNull(keyName);
URIBuilder builder = new URIBuilder(ozoneRestUri);
builder.setPath(PATH_SEPARATOR + volumeName +
PATH_SEPARATOR + bucketName +
PATH_SEPARATOR + keyName);
HttpGet getRequest = new HttpGet(builder.build());
addOzoneHeaders(getRequest);
HttpEntity entity = executeHttpRequest(getRequest);
PipedInputStream in = new PipedInputStream();
OutputStream out = new PipedOutputStream(in);
FutureTask<Void> futureTask =
new FutureTask<>(() -> {
entity.writeTo(out);
out.close();
return null;
});
new Thread(futureTask).start();
OzoneInputStream inputStream = new OzoneInputStream(
new InputStream() {
@Override
public int read() throws IOException {
return in.read();
}
@Override
public void close() throws IOException {
in.close();
EntityUtils.consume(entity);
}
});
return inputStream;
} catch (URISyntaxException e) {
throw new IOException(e);
}
}
@Override
public void deleteKey(String volumeName, String bucketName, String keyName)
throws IOException {
throw new UnsupportedOperationException("Not yet implemented.");
try {
Preconditions.checkNotNull(volumeName);
Preconditions.checkNotNull(bucketName);
Preconditions.checkNotNull(keyName);
URIBuilder builder = new URIBuilder(ozoneRestUri);
builder.setPath(PATH_SEPARATOR + volumeName +
PATH_SEPARATOR + bucketName + PATH_SEPARATOR + keyName);
HttpDelete httpDelete = new HttpDelete(builder.build());
addOzoneHeaders(httpDelete);
EntityUtils.consume(executeHttpRequest(httpDelete));
} catch (URISyntaxException e) {
throw new IOException(e);
}
}
@Override
@ -216,10 +656,113 @@ public List<OzoneKey> listKeys(String volumeName, String bucketName,
public OzoneKey getKeyDetails(
String volumeName, String bucketName, String keyName)
throws IOException {
throw new UnsupportedOperationException("Not yet implemented.");
try {
Preconditions.checkNotNull(volumeName);
Preconditions.checkNotNull(bucketName);
Preconditions.checkNotNull(keyName);
URIBuilder builder = new URIBuilder(ozoneRestUri);
builder.setPath(PATH_SEPARATOR + volumeName +
PATH_SEPARATOR + bucketName + PATH_SEPARATOR + keyName);
builder.setParameter(Header.OZONE_INFO_QUERY_TAG,
Header.OZONE_INFO_QUERY_KEY);
HttpGet httpGet = new HttpGet(builder.build());
addOzoneHeaders(httpGet);
HttpEntity response = executeHttpRequest(httpGet);
KeyInfo keyInfo =
KeyInfo.parse(EntityUtils.toString(response));
OzoneKey key = new OzoneKey(volumeName,
bucketName,
keyInfo.getKeyName(),
keyInfo.getSize(),
OzoneClientUtils.formatDateTime(keyInfo.getCreatedOn()),
OzoneClientUtils.formatDateTime(keyInfo.getModifiedOn()));
EntityUtils.consume(response);
return key;
} catch (URISyntaxException | ParseException e) {
throw new IOException(e);
}
}
/**
* Adds Ozone headers to http request.
*
* @param httpRequest Http Request
*/
private void addOzoneHeaders(HttpUriRequest httpRequest) {
httpRequest.addHeader(HttpHeaders.AUTHORIZATION,
Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " +
ugi.getUserName());
httpRequest.addHeader(HttpHeaders.DATE,
OzoneClientUtils.formatDateTime(Time.monotonicNow()));
httpRequest.addHeader(Header.OZONE_VERSION_HEADER,
Header.OZONE_V1_VERSION_HEADER);
}
/**
* Sends the http request to server and returns the response HttpEntity.
* It's responsibility of the caller to consume and close response HttpEntity
* by calling {@code EntityUtils.consume}
*
* @param httpUriRequest http request
* @throws IOException
*/
private HttpEntity executeHttpRequest(HttpUriRequest httpUriRequest)
throws IOException {
HttpResponse response = httpClient.execute(httpUriRequest);
int errorCode = response.getStatusLine().getStatusCode();
HttpEntity entity = response.getEntity();
if ((errorCode == HTTP_OK) || (errorCode == HTTP_CREATED)) {
return entity;
}
if (entity != null) {
throw new IOException(
OzoneException.parse(EntityUtils.toString(entity)));
} else {
throw new IOException("Unexpected null in http payload," +
" while processing request");
}
}
/**
* Converts OzoneConts.Versioning to boolean.
*
* @param version
* @return corresponding boolean value
*/
private Boolean getBucketVersioningFlag(
OzoneConsts.Versioning version) {
if(version != null) {
switch(version) {
case ENABLED:
return true;
case NOT_DEFINED:
case DISABLED:
default:
return false;
}
}
return false;
}
/**
* Converts Bucket versioning flag into OzoneConts.Versioning.
*
* @param flag versioning flag
* @return corresponding OzoneConts.Versionin
*/
private OzoneConsts.Versioning getBucketVersioning(Boolean flag) {
if(flag != null) {
if(flag) {
return OzoneConsts.Versioning.ENABLED;
} else {
return OzoneConsts.Versioning.DISABLED;
}
}
return OzoneConsts.Versioning.NOT_DEFINED;
}
@Override
public void close() throws IOException {
httpClient.close();
}
}

View File

@ -0,0 +1,22 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.client.rest.exceptions;
/**
* This package contains ozone rest client libraries.
*/

View File

@ -40,9 +40,10 @@ public final class Header {
public static final String OZONE_V1_VERSION_HEADER ="v1";
public static final String OZONE_LIST_QUERY_SERVICE = "service";
public static final String OZONE_LIST_QUERY_VOLUME = "volume";
public static final String OZONE_LIST_QUERY_BUCKET = "bucket";
public static final String OZONE_LIST_QUERY_KEY = "key";
public static final String OZONE_INFO_QUERY_VOLUME = "volume";
public static final String OZONE_INFO_QUERY_BUCKET = "bucket";
public static final String OZONE_INFO_QUERY_KEY = "key";
public static final String OZONE_REQUEST_ID = "x-ozone-request-id";
public static final String OZONE_SERVER_NAME = "x-ozone-server-name";
@ -56,7 +57,7 @@ public final class Header {
public static final String OZONE_ACL_ADD = "ADD";
public static final String OZONE_ACL_REMOVE = "REMOVE";
public static final String OZONE_LIST_QUERY_TAG ="info";
public static final String OZONE_INFO_QUERY_TAG ="info";
public static final String OZONE_QUOTA_QUERY_TAG ="quota";
public static final String CONTENT_MD5 = "Content-MD5";
public static final String OZONE_LIST_QUERY_PREFIX="prefix";

View File

@ -0,0 +1,230 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.client.rest.response;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
import com.google.common.base.Preconditions;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.OzoneConsts;
import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
/**
* BucketInfo class is used used for parsing json response
* when BucketInfo Call is made.
*/
public class BucketInfo implements Comparable<BucketInfo> {
private static final ObjectReader READER =
new ObjectMapper().readerFor(BucketInfo.class);
private String volumeName;
private String bucketName;
private String createdOn;
private List<OzoneAcl> acls;
private OzoneConsts.Versioning versioning;
private StorageType storageType;
/**
* Constructor for BucketInfo.
*
* @param volumeName
* @param bucketName
*/
public BucketInfo(String volumeName, String bucketName) {
this.volumeName = volumeName;
this.bucketName = bucketName;
}
/**
* Default constructor for BucketInfo.
*/
public BucketInfo() {
acls = new LinkedList<>();
}
/**
* Parse a JSON string into BucketInfo Object.
*
* @param jsonString Json String
* @return BucketInfo
* @throws IOException
*/
public static BucketInfo parse(String jsonString) throws IOException {
return READER.readValue(jsonString);
}
/**
* Returns a List of ACLs set on the Bucket.
*
* @return List of Acl
*/
public List<OzoneAcl> getAcls() {
return acls;
}
/**
* Sets ACls.
*
* @param acls Acl list
*/
public void setAcls(List<OzoneAcl> acls) {
this.acls = acls;
}
/**
* Returns Storage Type info.
*
* @return Storage Type of the bucket
*/
public StorageType getStorageType() {
return storageType;
}
/**
* Sets the Storage Type.
*
* @param storageType Storage Type
*/
public void setStorageType(StorageType storageType) {
this.storageType = storageType;
}
/**
* Returns versioning.
*
* @return versioning Enum
*/
public OzoneConsts.Versioning getVersioning() {
return versioning;
}
/**
* Sets Versioning.
*
* @param versioning
*/
public void setVersioning(OzoneConsts.Versioning versioning) {
this.versioning = versioning;
}
/**
* Gets bucket Name.
*
* @return String
*/
public String getBucketName() {
return bucketName;
}
/**
* Sets bucket Name.
*
* @param bucketName Name of the bucket
*/
public void setBucketName(String bucketName) {
this.bucketName = bucketName;
}
/**
* Sets creation time of the bucket.
*
* @param creationTime Date String
*/
public void setCreatedOn(String creationTime) {
this.createdOn = creationTime;
}
/**
* Returns creation time.
*
* @return creation time of bucket.
*/
public String getCreatedOn() {
return createdOn;
}
/**
* Returns Volume Name.
*
* @return String volume name
*/
public String getVolumeName() {
return volumeName;
}
/**
* Sets the Volume Name of bucket.
*
* @param volumeName volumeName
*/
public void setVolumeName(String volumeName) {
this.volumeName = volumeName;
}
/**
* Compares this object with the specified object for order. Returns a
* negative integer, zero, or a positive integer as this object is less
* than, equal to, or greater than the specified object.
*
* Please note : BucketInfo compare functions are used only within the
* context of a volume, hence volume name is purposefully ignored in
* compareTo, equal and hashcode functions of this class.
*/
@Override
public int compareTo(BucketInfo o) {
Preconditions.checkState(o.getVolumeName().equals(this.getVolumeName()));
return this.bucketName.compareTo(o.getBucketName());
}
/**
* Checks if two bucketInfo's are equal.
* @param o Object BucketInfo
* @return True or False
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof BucketInfo)) {
return false;
}
BucketInfo that = (BucketInfo) o;
Preconditions.checkState(that.getVolumeName().equals(this.getVolumeName()));
return bucketName.equals(that.bucketName);
}
/**
* Hash Code for this object.
* @return int
*/
@Override
public int hashCode() {
return bucketName.hashCode();
}
}

View File

@ -0,0 +1,218 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.client.rest.response;
import java.io.IOException;
import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
/**
* KeyInfo class is used used for parsing json response
* when KeyInfo Call is made.
*/
public class KeyInfo implements Comparable<KeyInfo> {
private static final ObjectReader READER =
new ObjectMapper().readerFor(KeyInfo.class);
private long version;
private String md5hash;
private String createdOn;
private String modifiedOn;
private long size;
private String keyName;
/**
* When this key was created.
*
* @return Date String
*/
public String getCreatedOn() {
return createdOn;
}
/**
* When this key was modified.
*
* @return Date String
*/
public String getModifiedOn() {
return modifiedOn;
}
/**
* When this key was created.
*
* @param createdOn Date String
*/
public void setCreatedOn(String createdOn) {
this.createdOn = createdOn;
}
/**
* When this key was modified.
*
* @param modifiedOn Date String
*/
public void setModifiedOn(String modifiedOn) {
this.modifiedOn = modifiedOn;
}
/**
* Gets the Key name of this object.
*
* @return String
*/
public String getKeyName() {
return keyName;
}
/**
* Sets the Key name of this object.
*
* @param keyName String
*/
public void setKeyName(String keyName) {
this.keyName = keyName;
}
/**
* Returns the MD5 Hash for the data of this key.
*
* @return String MD5
*/
public String getMd5hash() {
return md5hash;
}
/**
* Sets the MD5 value of this key.
*
* @param md5hash Md5 of this file
*/
public void setMd5hash(String md5hash) {
this.md5hash = md5hash;
}
/**
* Number of bytes stored in the data part of this key.
*
* @return long size of the data file
*/
public long getSize() {
return size;
}
/**
* Sets the size of the data part of this key.
*
* @param size Size in long
*/
public void setSize(long size) {
this.size = size;
}
/**
* Version of this key.
*
* @return returns the version of this key.
*/
public long getVersion() {
return version;
}
/**
* Sets the version of this key.
*
* @param version - Version String
*/
public void setVersion(long version) {
this.version = version;
}
/**
* Compares this object with the specified object for order. Returns a
* negative integer, zero, or a positive integer as this object is less
* than, equal to, or greater than the specified object.
*
* @param o the object to be compared.
* @return a negative integer, zero, or a positive integer as this object
* is less than, equal to, or greater than the specified object.
* @throws NullPointerException if the specified object is null
* @throws ClassCastException if the specified object's type prevents it
* from being compared to this object.
*/
@Override
public int compareTo(KeyInfo o) {
if (this.keyName.compareTo(o.getKeyName()) != 0) {
return this.keyName.compareTo(o.getKeyName());
}
if (this.getVersion() == o.getVersion()) {
return 0;
}
if (this.getVersion() < o.getVersion()) {
return -1;
}
return 1;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
KeyInfo keyInfo = (KeyInfo) o;
return new EqualsBuilder()
.append(version, keyInfo.version)
.append(keyName, keyInfo.keyName)
.isEquals();
}
@Override
public int hashCode() {
return new HashCodeBuilder(17, 37)
.append(version)
.append(keyName)
.toHashCode();
}
/**
* Parse a string to return KeyInfo Object.
*
* @param jsonString Json String
* @return keyInfo
* @throws IOException
*/
public static KeyInfo parse(String jsonString) throws IOException {
return READER.readValue(jsonString);
}
}

View File

@ -0,0 +1,215 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.client.rest.response;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ozone.client.OzoneQuota;
import java.io.IOException;
/**
* VolumeInfo Class is used for parsing json response
* when VolumeInfo Call is made.
*/
@InterfaceAudience.Private
public class VolumeInfo implements Comparable<VolumeInfo> {
private static final ObjectReader READER =
new ObjectMapper().readerFor(VolumeInfo.class);
private VolumeOwner owner;
private OzoneQuota quota;
private String volumeName;
private String createdOn;
private String createdBy;
/**
* Constructor for VolumeInfo.
*
* @param volumeName - Name of the Volume
* @param createdOn _ Date String
* @param createdBy - Person who created it
*/
public VolumeInfo(String volumeName, String createdOn,
String createdBy) {
this.volumeName = volumeName;
this.createdOn = createdOn;
this.createdBy = createdBy;
}
/**
* Constructor for VolumeInfo.
*/
public VolumeInfo() {
}
/**
* gets the volume name.
*
* @return Volume Name
*/
public String getVolumeName() {
return volumeName;
}
/**
* Sets the volume name.
*
* @param volumeName Volume Name
*/
public void setVolumeName(String volumeName) {
this.volumeName = volumeName;
}
/**
* Returns the name of the person who created this volume.
*
* @return Name of Admin who created this
*/
public String getCreatedBy() {
return createdBy;
}
/**
* Sets the user name of the person who created this volume.
*
* @param createdBy UserName
*/
public void setCreatedBy(String createdBy) {
this.createdBy = createdBy;
}
/**
* Gets the date on which this volume was created.
*
* @return Date String
*/
public String getCreatedOn() {
return createdOn;
}
/**
* Sets the date string.
*
* @param createdOn Date String
*/
public void setCreatedOn(String createdOn) {
this.createdOn = createdOn;
}
/**
* Returns the owner info.
*
* @return OwnerInfo
*/
public VolumeOwner getOwner() {
return owner;
}
/**
* Sets the owner.
*
* @param owner OwnerInfo
*/
public void setOwner(VolumeOwner owner) {
this.owner = owner;
}
/**
* Returns the quota information on a volume.
*
* @return Quota
*/
public OzoneQuota getQuota() {
return quota;
}
/**
* Sets the quota info.
*
* @param quota Quota Info
*/
public void setQuota(OzoneQuota quota) {
this.quota = quota;
}
/**
* Comparable Interface.
* @param o VolumeInfo Object.
* @return Result of comparison
*/
@Override
public int compareTo(VolumeInfo o) {
return this.volumeName.compareTo(o.getVolumeName());
}
/**
* Returns VolumeInfo class from json string.
*
* @param data Json String
*
* @return VolumeInfo
*
* @throws IOException
*/
public static VolumeInfo parse(String data) throws IOException {
return READER.readValue(data);
}
/**
* Indicates whether some other object is "equal to" this one.
*
* @param obj the reference object with which to compare.
*
* @return {@code true} if this object is the same as the obj
* argument; {@code false} otherwise.
*/
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
VolumeInfo otherInfo = (VolumeInfo) obj;
return otherInfo.getVolumeName().equals(this.getVolumeName());
}
/**
* Returns a hash code value for the object. This method is
* supported for the benefit of hash tables such as those provided by
* HashMap.
* @return a hash code value for this object.
*
* @see Object#equals(Object)
* @see System#identityHashCode
*/
@Override
public int hashCode() {
return getVolumeName().hashCode();
}
}

View File

@ -0,0 +1,60 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.client.rest.response;
import com.fasterxml.jackson.annotation.JsonInclude;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Volume Owner represents the owner of a volume.
*
* This is a class instead of a string since we might need to extend this class
* to support other forms of authentication.
*/
@InterfaceAudience.Private
public class VolumeOwner {
@JsonInclude(JsonInclude.Include.NON_NULL)
private String name;
/**
* Constructor for VolumeOwner.
*
* @param name name of the User
*/
public VolumeOwner(String name) {
this.name = name;
}
/**
* Constructs Volume Owner.
*/
public VolumeOwner() {
name = null;
}
/**
* Returns the user name.
*
* @return Name
*/
public String getName() {
return name;
}
}

View File

@ -0,0 +1,24 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.hadoop.ozone.client.rest.response;
/**
* This package contains class for ozone rest client library.
*/

View File

@ -594,8 +594,8 @@ public OzoneKey getKeyInfo(String keyName) throws OzoneException {
builder
.setPath("/" + getVolume().getVolumeName() + "/" + getBucketName()
+ "/" + keyName)
.setParameter(Header.OZONE_LIST_QUERY_TAG,
Header.OZONE_LIST_QUERY_KEY)
.setParameter(Header.OZONE_INFO_QUERY_TAG,
Header.OZONE_INFO_QUERY_KEY)
.build();
getRequest = client.getHttpGet(builder.toString());

View File

@ -187,8 +187,8 @@ public OzoneVolume getVolume(String volumeName) throws OzoneException {
OzoneUtils.verifyResourceName(volumeName);
URIBuilder builder = new URIBuilder(endPointURI);
builder.setPath("/" + volumeName)
.setParameter(Header.OZONE_LIST_QUERY_TAG,
Header.OZONE_LIST_QUERY_VOLUME)
.setParameter(Header.OZONE_INFO_QUERY_TAG,
Header.OZONE_INFO_QUERY_VOLUME)
.build();
httpGet = getHttpGet(builder.toString());

View File

@ -353,8 +353,8 @@ public OzoneBucket getBucket(String bucketName) throws OzoneException {
OzoneUtils.verifyResourceName(bucketName);
URIBuilder builder = new URIBuilder(getClient().getEndPointURI());
builder.setPath("/" + getVolumeName() + "/" + bucketName)
.setParameter(Header.OZONE_LIST_QUERY_TAG,
Header.OZONE_LIST_QUERY_BUCKET).build();
.setParameter(Header.OZONE_INFO_QUERY_TAG,
Header.OZONE_INFO_QUERY_BUCKET).build();
getRequest = client.getHttpGet(builder.toString());
return executeInfoBucket(getRequest, httpClient);

View File

@ -180,10 +180,10 @@ public Response listBucket(String volume, String bucket, final String info,
public Response doProcess(BucketArgs args)
throws OzoneException, IOException {
switch (info) {
case Header.OZONE_LIST_QUERY_KEY:
case Header.OZONE_INFO_QUERY_KEY:
ListArgs listArgs = new ListArgs(args, prefix, maxKeys, startPage);
return getBucketKeysList(listArgs);
case Header.OZONE_LIST_QUERY_BUCKET:
case Header.OZONE_INFO_QUERY_BUCKET:
return getBucketInfoResponse(args);
default:
OzoneException ozException =

View File

@ -86,7 +86,7 @@ public Response doProcess(KeyArgs args, InputStream input,
throws IOException, OzoneException, NoSuchAlgorithmException {
if (info == null) {
return getKey(args);
} else if (info.equals(Header.OZONE_LIST_QUERY_KEY)) {
} else if (info.equals(Header.OZONE_INFO_QUERY_KEY)) {
return getKeyInfo(args);
}

View File

@ -228,10 +228,10 @@ public Response doProcess(VolumeArgs args)
throws IOException, OzoneException {
switch (info) {
case Header.OZONE_LIST_QUERY_BUCKET:
case Header.OZONE_INFO_QUERY_BUCKET:
MDC.put(OZONE_FUNCTION, "ListBucket");
return getBucketsInVolume(args, prefix, maxKeys, prevKey);
case Header.OZONE_LIST_QUERY_VOLUME:
case Header.OZONE_INFO_QUERY_VOLUME:
MDC.put(OZONE_FUNCTION, "InfoVolume");
assertNoListParamPresent(uriInfo, args);
return getVolumeInfoResponse(args); // Return volume info

View File

@ -166,8 +166,8 @@ Response deleteBucket(@PathParam("volume") String volume,
true, paramType = "header")})
Response listBucket(@PathParam("volume") String volume,
@PathParam("bucket") String bucket,
@DefaultValue(Header.OZONE_LIST_QUERY_KEY)
@QueryParam(Header.OZONE_LIST_QUERY_TAG)
@DefaultValue(Header.OZONE_INFO_QUERY_KEY)
@QueryParam(Header.OZONE_INFO_QUERY_TAG)
String info,
@QueryParam(Header.OZONE_LIST_QUERY_PREFIX)
String prefix,

View File

@ -110,7 +110,7 @@ Response putKey(@PathParam("volume") String volume,
true, paramType = "header")})
Response getKey(@PathParam("volume") String volume,
@PathParam("bucket") String bucket, @PathParam("keys") String keys,
@QueryParam(Header.OZONE_LIST_QUERY_TAG) String info,
@QueryParam(Header.OZONE_INFO_QUERY_TAG) String info,
@Context Request req, @Context UriInfo uriInfo,
@Context HttpHeaders headers) throws OzoneException;

View File

@ -169,8 +169,8 @@ Response deleteVolume(@PathParam("volume") String volume,
@ApiImplicitParam(name = "Authorization", example = "OZONE", required =
true, paramType = "header")})
Response getVolumeInfo(@PathParam("volume") String volume,
@DefaultValue(Header.OZONE_LIST_QUERY_BUCKET)
@QueryParam(Header.OZONE_LIST_QUERY_TAG) String info,
@DefaultValue(Header.OZONE_INFO_QUERY_BUCKET)
@QueryParam(Header.OZONE_INFO_QUERY_TAG) String info,
@QueryParam(Header.OZONE_LIST_QUERY_PREFIX) String prefix,
@DefaultValue(Header.OZONE_DEFAULT_LIST_SIZE)
@QueryParam(Header.OZONE_LIST_QUERY_MAXKEYS) int keys,

View File

@ -25,8 +25,10 @@
import org.apache.hadoop.ozone.client.rest.headers.Header;
import org.apache.hadoop.ozone.web.interfaces.UserAuth;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.security.UserGroupInformation;
import javax.ws.rs.core.HttpHeaders;
import java.io.IOException;
import java.util.List;
/**
@ -104,11 +106,18 @@ public String getUser(UserArgs userArgs) throws OzoneException {
public boolean isAdmin(UserArgs userArgs) throws OzoneException {
assert userArgs != null : "userArgs cannot be null";
String user = getUser(userArgs);
String user;
String currentUser;
try {
user = getUser(userArgs);
currentUser = UserGroupInformation.getCurrentUser().getShortUserName();
} catch (IOException e) {
throw ErrorTable.newError(ErrorTable.BAD_AUTHORIZATION, userArgs);
}
return
(user.compareToIgnoreCase(OzoneConsts.OZONE_SIMPLE_ROOT_USER) == 0) ||
(user.compareToIgnoreCase(OzoneConsts.OZONE_SIMPLE_HDFS_USER) == 0);
(user.compareToIgnoreCase(OzoneConsts.OZONE_SIMPLE_HDFS_USER) == 0)
|| (user.compareToIgnoreCase(currentUser) == 0);
}
/**

View File

@ -1137,4 +1137,22 @@
percentage in float notation (X.Yf), with 1.0f meaning 100%.
</description>
</property>
<property>
<name>ozone.rest.client.http.connection.max</name>
<value>100</value>
<tag>OZONE, CLIENT</tag>
<description>
This defines the overall connection limit for the connection pool used in
RestClient.
</description>
</property>
<property>
<name>ozone.rest.client.http.connection.per-route.max</name>
<value>20</value>
<tag>OZONE, CLIENT</tag>
<description>
This defines the connection limit per one HTTP route/host. Total max
connection is limited by ozone.rest.client.http.connection.max property.
</description>
</property>
</configuration>

View File

@ -0,0 +1,409 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.client.rest;
import org.apache.hadoop.conf.OzoneConfiguration;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.client.BucketArgs;
import org.apache.hadoop.ozone.client.ObjectStore;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientFactory;
import org.apache.hadoop.ozone.client.OzoneKey;
import org.apache.hadoop.ozone.client.OzoneQuota;
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.client.ReplicationFactor;
import org.apache.hadoop.ozone.client.ReplicationType;
import org.apache.hadoop.ozone.client.VolumeArgs;
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.UUID;
/**
* This class is to test all the public facing APIs of Ozone REST Client.
*/
public class TestOzoneRestClient {
@Rule
public ExpectedException thrown = ExpectedException.none();
private static MiniOzoneCluster cluster = null;
private static OzoneClient ozClient = null;
private static ObjectStore store = null;
/**
* Create a MiniDFSCluster for testing.
* <p>
* Ozone is made active by setting OZONE_ENABLED = true and
* OZONE_HANDLER_TYPE_KEY = "distributed"
*
* @throws IOException
*/
@BeforeClass
public static void init() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
cluster = new MiniOzoneCluster.Builder(conf)
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build();
DataNode datanode = cluster.getDataNodes().get(0);
conf.set(OzoneConfigKeys.OZONE_CLIENT_PROTOCOL,
"org.apache.hadoop.ozone.client.rest.RestClient");
conf.set(OzoneConfigKeys.OZONE_REST_SERVERS,
datanode.getDatanodeHostname());
conf.set(OzoneConfigKeys.OZONE_REST_CLIENT_PORT,
Integer.toString(datanode.getInfoPort()));
OzoneClientFactory.setConfiguration(conf);
ozClient = OzoneClientFactory.getClient();
store = ozClient.getObjectStore();
}
@Test
public void testCreateVolume()
throws IOException, OzoneException {
String volumeName = UUID.randomUUID().toString();
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
Assert.assertEquals(volumeName, volume.getName());
}
@Test
public void testCreateVolumeWithOwner()
throws IOException, OzoneException {
String volumeName = UUID.randomUUID().toString();
VolumeArgs.Builder argsBuilder = VolumeArgs.newBuilder();
argsBuilder.setOwner("test");
store.createVolume(volumeName, argsBuilder.build());
OzoneVolume volume = store.getVolume(volumeName);
Assert.assertEquals(volumeName, volume.getName());
Assert.assertEquals("test", volume.getOwner());
}
@Test
public void testCreateVolumeWithQuota()
throws IOException, OzoneException {
String volumeName = UUID.randomUUID().toString();
VolumeArgs.Builder argsBuilder = VolumeArgs.newBuilder();
argsBuilder.setOwner("test").setQuota("1000000000 BYTES");
store.createVolume(volumeName, argsBuilder.build());
OzoneVolume volume = store.getVolume(volumeName);
Assert.assertEquals(volumeName, volume.getName());
Assert.assertEquals("test", volume.getOwner());
Assert.assertEquals(1000000000L, volume.getQuota());
}
@Test
public void testVolumeAlreadyExist()
throws IOException, OzoneException {
String volumeName = UUID.randomUUID().toString();
store.createVolume(volumeName);
try {
store.createVolume(volumeName);
} catch (IOException ex) {
Assert.assertEquals(
"Volume creation failed, error:VOLUME_ALREADY_EXISTS",
ex.getCause().getMessage());
}
}
@Test
public void testSetVolumeOwner()
throws IOException, OzoneException {
String volumeName = UUID.randomUUID().toString();
store.createVolume(volumeName);
store.getVolume(volumeName).setOwner("test");
OzoneVolume volume = store.getVolume(volumeName);
Assert.assertEquals("test", volume.getOwner());
}
@Test
public void testSetVolumeQuota()
throws IOException, OzoneException {
String volumeName = UUID.randomUUID().toString();
store.createVolume(volumeName);
store.getVolume(volumeName).setQuota(
OzoneQuota.parseQuota("100000000 BYTES"));
OzoneVolume volume = store.getVolume(volumeName);
Assert.assertEquals(100000000L, volume.getQuota());
}
@Test
public void testDeleteVolume()
throws IOException, OzoneException {
thrown.expectMessage("Info Volume failed, error");
String volumeName = UUID.randomUUID().toString();
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
Assert.assertNotNull(volume);
store.deleteVolume(volumeName);
store.getVolume(volumeName);
}
@Test
public void testCreateBucket()
throws IOException, OzoneException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
Assert.assertEquals(bucketName, bucket.getName());
}
@Test
public void testCreateBucketWithVersioning()
throws IOException, OzoneException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
BucketArgs.Builder builder = BucketArgs.newBuilder();
builder.setVersioning(true);
volume.createBucket(bucketName, builder.build());
OzoneBucket bucket = volume.getBucket(bucketName);
Assert.assertEquals(bucketName, bucket.getName());
Assert.assertEquals(true, bucket.getVersioning());
}
@Test
public void testCreateBucketWithStorageType()
throws IOException, OzoneException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
BucketArgs.Builder builder = BucketArgs.newBuilder();
builder.setStorageType(StorageType.SSD);
volume.createBucket(bucketName, builder.build());
OzoneBucket bucket = volume.getBucket(bucketName);
Assert.assertEquals(bucketName, bucket.getName());
Assert.assertEquals(StorageType.SSD, bucket.getStorageType());
}
@Test
public void testCreateBucketWithAcls()
throws IOException, OzoneException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
OzoneAcl userAcl = new OzoneAcl(OzoneAcl.OzoneACLType.USER, "test",
OzoneAcl.OzoneACLRights.READ_WRITE);
List<OzoneAcl> acls = new ArrayList<>();
acls.add(userAcl);
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
BucketArgs.Builder builder = BucketArgs.newBuilder();
builder.setAcls(acls);
volume.createBucket(bucketName, builder.build());
OzoneBucket bucket = volume.getBucket(bucketName);
Assert.assertEquals(bucketName, bucket.getName());
Assert.assertTrue(bucket.getAcls().contains(userAcl));
}
@Test
public void testCreateBucketWithAllArgument()
throws IOException, OzoneException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
OzoneAcl userAcl = new OzoneAcl(OzoneAcl.OzoneACLType.USER, "test",
OzoneAcl.OzoneACLRights.READ_WRITE);
List<OzoneAcl> acls = new ArrayList<>();
acls.add(userAcl);
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
BucketArgs.Builder builder = BucketArgs.newBuilder();
builder.setVersioning(true)
.setStorageType(StorageType.SSD)
.setAcls(acls);
volume.createBucket(bucketName, builder.build());
OzoneBucket bucket = volume.getBucket(bucketName);
Assert.assertEquals(bucketName, bucket.getName());
Assert.assertEquals(true, bucket.getVersioning());
Assert.assertEquals(StorageType.SSD, bucket.getStorageType());
Assert.assertTrue(bucket.getAcls().contains(userAcl));
}
@Test
public void testAddBucketAcl()
throws IOException, OzoneException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
List<OzoneAcl> acls = new ArrayList<>();
acls.add(new OzoneAcl(
OzoneAcl.OzoneACLType.USER, "test",
OzoneAcl.OzoneACLRights.READ_WRITE));
OzoneBucket bucket = volume.getBucket(bucketName);
bucket.addAcls(acls);
OzoneBucket newBucket = volume.getBucket(bucketName);
Assert.assertEquals(bucketName, newBucket.getName());
Assert.assertTrue(bucket.getAcls().contains(acls.get(0)));
}
@Test
public void testRemoveBucketAcl()
throws IOException, OzoneException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
OzoneAcl userAcl = new OzoneAcl(OzoneAcl.OzoneACLType.USER, "test",
OzoneAcl.OzoneACLRights.READ_WRITE);
List<OzoneAcl> acls = new ArrayList<>();
acls.add(userAcl);
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
BucketArgs.Builder builder = BucketArgs.newBuilder();
builder.setAcls(acls);
volume.createBucket(bucketName, builder.build());
OzoneBucket bucket = volume.getBucket(bucketName);
bucket.removeAcls(acls);
OzoneBucket newBucket = volume.getBucket(bucketName);
Assert.assertEquals(bucketName, newBucket.getName());
Assert.assertTrue(!bucket.getAcls().contains(acls.get(0)));
}
@Test
public void testSetBucketVersioning()
throws IOException, OzoneException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
bucket.setVersioning(true);
OzoneBucket newBucket = volume.getBucket(bucketName);
Assert.assertEquals(bucketName, newBucket.getName());
Assert.assertEquals(true, newBucket.getVersioning());
}
@Test
public void testSetBucketStorageType()
throws IOException, OzoneException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
bucket.setStorageType(StorageType.SSD);
OzoneBucket newBucket = volume.getBucket(bucketName);
Assert.assertEquals(bucketName, newBucket.getName());
Assert.assertEquals(StorageType.SSD, newBucket.getStorageType());
}
@Test
public void testDeleteBucket()
throws IOException, OzoneException {
thrown.expectMessage("Info Bucket failed, error");
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
Assert.assertNotNull(bucket);
volume.deleteBucket(bucketName);
volume.getBucket(bucketName);
}
@Test
public void testPutKey()
throws IOException, OzoneException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String value = "sample value";
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
for (int i = 0; i < 10; i++) {
String keyName = UUID.randomUUID().toString();
OzoneOutputStream out = bucket.createKey(keyName,
value.getBytes().length, ReplicationType.STAND_ALONE,
ReplicationFactor.ONE);
out.write(value.getBytes());
out.close();
OzoneKey key = bucket.getKey(keyName);
Assert.assertEquals(keyName, key.getName());
OzoneInputStream is = bucket.readKey(keyName);
byte[] fileContent = new byte[value.getBytes().length];
is.read(fileContent);
Assert.assertEquals(value, new String(fileContent));
}
}
@Test
public void testDeleteKey()
throws IOException, OzoneException {
thrown.expectMessage("Lookup key failed, error");
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String keyName = UUID.randomUUID().toString();
String value = "sample value";
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
OzoneOutputStream out = bucket.createKey(keyName,
value.getBytes().length, ReplicationType.STAND_ALONE,
ReplicationFactor.ONE);
out.write(value.getBytes());
out.close();
OzoneKey key = bucket.getKey(keyName);
Assert.assertEquals(keyName, key.getName());
bucket.deleteKey(keyName);
bucket.getKey(keyName);
}
/**
* Close OzoneClient and shutdown MiniDFSCluster.
*/
@AfterClass
public static void shutdown() throws IOException {
if(ozClient != null) {
ozClient.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}

View File

@ -0,0 +1,23 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.client.rest;
/**
* This package contains test class for Ozone rest client library.
*/