HDDS-738. Removing REST protocol support from OzoneClient.

Contributed by Elek, Marton.
This commit is contained in:
Anu Engineer 2019-08-28 09:53:31 -07:00
parent c0499bd704
commit dc72782008
150 changed files with 563 additions and 18724 deletions

View File

@ -125,9 +125,6 @@ public final class OzoneConfigKeys {
* */
public static final String OZONE_ADMINISTRATORS_WILDCARD = "*";
public static final String OZONE_CLIENT_PROTOCOL =
"ozone.client.protocol";
public static final String OZONE_CLIENT_STREAM_BUFFER_FLUSH_SIZE =
"ozone.client.stream.buffer.flush.size";

View File

@ -461,19 +461,6 @@
there is no wait.
</description>
</property>
<property>
<name>ozone.client.protocol</name>
<value>org.apache.hadoop.ozone.client.rpc.RpcClient</value>
<tag>OZONE, CLIENT, MANAGEMENT</tag>
<description>Protocol class to be used by the client to connect to ozone
cluster.
The build-in implementation includes:
org.apache.hadoop.ozone.client.rpc.RpcClient for RPC
org.apache.hadoop.ozone.client.rest.RestClient for REST
The default is the RpClient. Please do not change this unless you have a
very good understanding of what you are doing.
</description>
</property>
<property>
<name>ozone.client.socket.timeout</name>
<value>5000ms</value>
@ -1261,7 +1248,7 @@
<property>
<name>hdds.datanode.plugins</name>
<value>org.apache.hadoop.ozone.web.OzoneHddsDatanodeService</value>
<value></value>
<description>
Comma-separated list of HDDS datanode plug-ins to be activated when
HDDS service starts as part of datanode.

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.client;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.hadoop.conf.Configuration;
@ -211,6 +212,7 @@ public class OzoneBucket extends WithMetadata {
*
* @return acls
*/
@JsonIgnore
public List<OzoneAcl> getAcls() throws IOException {
return proxy.getAcl(ozoneObj);
}

View File

@ -17,38 +17,27 @@
*/
package org.apache.hadoop.ozone.client;
import org.apache.hadoop.ozone.client.rest.OzoneException;
/**
* This exception is thrown by the Ozone Clients.
*/
public class OzoneClientException extends OzoneException {
/**
* Constructor that allows the shortMessage.
*
* @param shortMessage Short Message
*/
public OzoneClientException(String shortMessage) {
super(0, shortMessage, shortMessage);
public class OzoneClientException extends Exception {
public OzoneClientException() {
}
/**
* Constructor that allows a shortMessage and an exception.
*
* @param shortMessage short message
* @param ex exception
*/
public OzoneClientException(String shortMessage, Exception ex) {
super(0, shortMessage, shortMessage, ex);
public OzoneClientException(String s) {
super(s);
}
/**
* Constructor that allows the shortMessage and a longer message.
*
* @param shortMessage Short Message
* @param message long error message
*/
public OzoneClientException(String shortMessage, String message) {
super(0, shortMessage, message);
public OzoneClientException(String s, Throwable throwable) {
super(s, throwable);
}
public OzoneClientException(Throwable throwable) {
super(throwable);
}
public OzoneClientException(String s, Throwable throwable, boolean b,
boolean b1) {
super(s, throwable, b, b1);
}
}

View File

@ -18,37 +18,22 @@
package org.apache.hadoop.ozone.client;
import com.google.common.base.Preconditions;
import java.io.IOException;
import java.lang.reflect.Proxy;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
import org.apache.hadoop.ozone.client.rest.RestClient;
import org.apache.hadoop.ozone.client.rpc.RpcClient;
import com.google.common.base.Preconditions;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Proxy;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_PROTOCOL;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY;
/**
* Factory class to create different types of OzoneClients.
* Based on <code>ozone.client.protocol</code>, it decides which
* protocol to use for the communication.
* Default value is
* <code>org.apache.hadoop.ozone.client.rpc.RpcClient</code>.<br>
* OzoneClientFactory constructs a proxy using
* {@link OzoneClientInvocationHandler}
* and creates OzoneClient instance with it.
* {@link OzoneClientInvocationHandler} dispatches the call to
* underlying {@link ClientProtocol} implementation.
* Factory class to create OzoneClients.
*/
public final class OzoneClientFactory {
@ -87,9 +72,7 @@ public final class OzoneClientFactory {
public static OzoneClient getClient(Configuration config)
throws IOException {
Preconditions.checkNotNull(config);
Class<? extends ClientProtocol> clazz = (Class<? extends ClientProtocol>)
config.getClass(OZONE_CLIENT_PROTOCOL, RpcClient.class);
return getClient(getClientProtocol(clazz, config), config);
return getClient(getClientProtocol(config), config);
}
/**
@ -166,85 +149,7 @@ public final class OzoneClientFactory {
public static OzoneClient getRpcClient(Configuration config)
throws IOException {
Preconditions.checkNotNull(config);
return getClient(getClientProtocol(RpcClient.class, config),
config);
}
/**
* Returns an OzoneClient which will use REST protocol.
*
* @param omHost
* hostname of OzoneManager to connect.
*
* @return OzoneClient
*
* @throws IOException
*/
public static OzoneClient getRestClient(String omHost)
throws IOException {
Configuration config = new OzoneConfiguration();
int port = OmUtils.getOmRestPort(config);
return getRestClient(omHost, port, config);
}
/**
* Returns an OzoneClient which will use REST protocol.
*
* @param omHost
* hostname of OzoneManager to connect.
*
* @param omHttpPort
* HTTP port of OzoneManager.
*
* @return OzoneClient
*
* @throws IOException
*/
public static OzoneClient getRestClient(String omHost, Integer omHttpPort)
throws IOException {
return getRestClient(omHost, omHttpPort, new OzoneConfiguration());
}
/**
* Returns an OzoneClient which will use REST protocol.
*
* @param omHost
* hostname of OzoneManager to connect.
*
* @param omHttpPort
* HTTP port of OzoneManager.
*
* @param config
* Configuration to be used for OzoneClient creation
*
* @return OzoneClient
*
* @throws IOException
*/
public static OzoneClient getRestClient(String omHost, Integer omHttpPort,
Configuration config)
throws IOException {
Preconditions.checkNotNull(omHost);
Preconditions.checkNotNull(omHttpPort);
Preconditions.checkNotNull(config);
config.set(OZONE_OM_HTTP_ADDRESS_KEY, omHost + ":" + omHttpPort);
return getRestClient(config);
}
/**
* Returns an OzoneClient which will use REST protocol.
*
* @param config
* Configuration to be used for OzoneClient creation
*
* @return OzoneClient
*
* @throws IOException
*/
public static OzoneClient getRestClient(Configuration config)
throws IOException {
Preconditions.checkNotNull(config);
return getClient(getClientProtocol(RestClient.class, config),
return getClient(getClientProtocol(config),
config);
}
@ -270,8 +175,6 @@ public final class OzoneClientFactory {
/**
* Returns an instance of Protocol class.
*
* @param protocolClass
* Class object of the ClientProtocol.
*
* @param config
* Configuration used to initialize ClientProtocol.
@ -280,23 +183,15 @@ public final class OzoneClientFactory {
*
* @throws IOException
*/
private static ClientProtocol getClientProtocol(
Class<? extends ClientProtocol> protocolClass, Configuration config)
private static ClientProtocol getClientProtocol(Configuration config)
throws IOException {
try {
LOG.debug("Using {} as client protocol.",
protocolClass.getCanonicalName());
Constructor<? extends ClientProtocol> ctor =
protocolClass.getConstructor(Configuration.class);
return ctor.newInstance(config);
return new RpcClient(config);
} catch (Exception e) {
final String message = "Couldn't create protocol " + protocolClass;
final String message = "Couldn't create RpcClient protocol";
LOG.error(message + " exception: ", e);
if (e.getCause() instanceof IOException) {
throw (IOException) e.getCause();
} else if (e instanceof InvocationTargetException) {
throw new IOException(message,
((InvocationTargetException) e).getTargetException());
} else {
throw new IOException(message, e);
}

View File

@ -17,106 +17,15 @@
*/
package org.apache.hadoop.ozone.client;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.hdds.client.OzoneQuota;
import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
import org.apache.hadoop.io.retry.RetryPolicies;
import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.client.rest.response.BucketInfo;
import org.apache.hadoop.ozone.client.rest.response.KeyInfo;
import org.apache.hadoop.ozone.client.rest.response.KeyInfoDetails;
import org.apache.hadoop.ozone.client.rest.response.KeyLocation;
import org.apache.hadoop.ozone.client.rest.response.VolumeInfo;
import org.apache.hadoop.ozone.client.rest.response.VolumeOwner;
/** A utility class for OzoneClient. */
public final class OzoneClientUtils {
private OzoneClientUtils() {}
/**
* Returns a BucketInfo object constructed using fields of the input
* OzoneBucket object.
*
* @param bucket OzoneBucket instance from which BucketInfo object needs to
* be created.
* @return BucketInfo instance
*/
public static BucketInfo asBucketInfo(OzoneBucket bucket) throws IOException {
BucketInfo bucketInfo =
new BucketInfo(bucket.getVolumeName(), bucket.getName());
bucketInfo
.setCreatedOn(HddsClientUtils.formatDateTime(bucket.getCreationTime()));
bucketInfo.setStorageType(bucket.getStorageType());
bucketInfo.setVersioning(
OzoneConsts.Versioning.getVersioning(bucket.getVersioning()));
bucketInfo.setEncryptionKeyName(
bucket.getEncryptionKeyName()==null? "N/A" :
bucket.getEncryptionKeyName());
return bucketInfo;
}
/**
* Returns a VolumeInfo object constructed using fields of the input
* OzoneVolume object.
*
* @param volume OzoneVolume instance from which VolumeInfo object needs to
* be created.
* @return VolumeInfo instance
*/
public static VolumeInfo asVolumeInfo(OzoneVolume volume) {
VolumeInfo volumeInfo = new VolumeInfo(volume.getName(),
HddsClientUtils.formatDateTime(volume.getCreationTime()),
volume.getOwner());
volumeInfo.setQuota(OzoneQuota.getOzoneQuota(volume.getQuota()));
volumeInfo.setOwner(new VolumeOwner(volume.getOwner()));
return volumeInfo;
}
/**
* Returns a KeyInfo object constructed using fields of the input
* OzoneKey object.
*
* @param key OzoneKey instance from which KeyInfo object needs to
* be created.
* @return KeyInfo instance
*/
public static KeyInfo asKeyInfo(OzoneKey key) {
KeyInfo keyInfo = new KeyInfo();
keyInfo.setKeyName(key.getName());
keyInfo.setCreatedOn(HddsClientUtils.formatDateTime(key.getCreationTime()));
keyInfo.setModifiedOn(
HddsClientUtils.formatDateTime(key.getModificationTime()));
keyInfo.setSize(key.getDataSize());
return keyInfo;
}
/**
* Returns a KeyInfoDetails object constructed using fields of the input
* OzoneKeyDetails object.
*
* @param key OzoneKeyDetails instance from which KeyInfo object needs to
* be created.
* @return KeyInfoDetails instance
*/
public static KeyInfoDetails asKeyInfoDetails(OzoneKeyDetails key) {
KeyInfoDetails keyInfo = new KeyInfoDetails();
keyInfo.setKeyName(key.getName());
keyInfo.setCreatedOn(HddsClientUtils.formatDateTime(key.getCreationTime()));
keyInfo.setModifiedOn(
HddsClientUtils.formatDateTime(key.getModificationTime()));
keyInfo.setSize(key.getDataSize());
List<KeyLocation> keyLocations = new ArrayList<>();
key.getOzoneKeyLocations().forEach((a) -> keyLocations.add(new KeyLocation(
a.getContainerID(), a.getLocalID(), a.getLength(), a.getOffset())));
keyInfo.setKeyLocation(keyLocations);
keyInfo.setFileEncryptionInfo(key.getFileEncryptionInfo());
return keyInfo;
}
public static RetryPolicy createRetryPolicy(int maxRetryCount,
long retryInterval) {

View File

@ -1,36 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.client.rest;
import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
import java.util.List;
import java.util.Random;
/**
* Default selector randomly picks one of the REST Server from the list.
*/
public class DefaultRestServerSelector implements RestServerSelector {
@Override
public ServiceInfo getRestServer(List<ServiceInfo> restServices) {
return restServices.get(
new Random().nextInt(restServices.size()));
}
}

View File

@ -1,45 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.client.rest;
import javax.ws.rs.core.Response;
import javax.ws.rs.ext.ExceptionMapper;
import org.slf4j.MDC;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Class the represents various errors returned by the
* Object Layer.
*/
public class OzoneExceptionMapper implements ExceptionMapper<OzoneException> {
private static final Logger LOG =
LoggerFactory.getLogger(OzoneExceptionMapper.class);
@Override
public Response toResponse(OzoneException exception) {
LOG.debug("Returning exception. ex: {}", exception.toJsonString());
MDC.clear();
return Response.status((int)exception.getHttpCode())
.entity(exception.toJsonString()).build();
}
}

View File

@ -1,40 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.client.rest;
import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
import java.util.List;
/**
* The implementor of this interface should select the REST server which will
* be used by the client to connect to Ozone Cluster, given list of
* REST Servers/DataNodes (DataNodes are the ones which hosts REST Service).
*/
public interface RestServerSelector {
/**
* Returns the REST Service which will be used by the client for connection.
*
* @param restServices list of available REST servers
* @return ServiceInfo
*/
ServiceInfo getRestServer(List<ServiceInfo> restServices);
}

View File

@ -1,22 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.client.rest.exceptions;
/**
* This package contains ozone rest client libraries.
*/

View File

@ -1,22 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
package org.apache.hadoop.ozone.client.rest.headers;
import org.apache.hadoop.classification.InterfaceAudience;

View File

@ -1,23 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.client.rest;
/**
* This package contains Ozone rest client library classes.
*/

View File

@ -1,267 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.client.rest;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import com.fasterxml.jackson.annotation.JsonAutoDetect;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
/**
* Class the represents various errors returned by the
* Ozone Layer.
*/
@InterfaceAudience.Private
public class OzoneException extends Exception {
private static final ObjectReader READER =
new ObjectMapper().readerFor(OzoneException.class);
private static final ObjectMapper MAPPER;
static {
MAPPER = new ObjectMapper();
MAPPER.setVisibility(
MAPPER.getSerializationConfig().getDefaultVisibilityChecker()
.withCreatorVisibility(JsonAutoDetect.Visibility.NONE)
.withFieldVisibility(JsonAutoDetect.Visibility.NONE)
.withGetterVisibility(JsonAutoDetect.Visibility.NONE)
.withIsGetterVisibility(JsonAutoDetect.Visibility.NONE)
.withSetterVisibility(JsonAutoDetect.Visibility.NONE));
}
@JsonProperty("httpCode")
private long httpCode;
@JsonProperty("shortMessage")
private String shortMessage;
@JsonProperty("resource")
private String resource;
@JsonProperty("message")
private String message;
@JsonProperty("requestID")
private String requestId;
@JsonProperty("hostName")
private String hostID;
/**
* Constructs a new exception with {@code null} as its detail message. The
* cause is not initialized, and may subsequently be initialized by a call
* to {@link #initCause}.
*
* This constructor is needed by Json Serializer.
*/
public OzoneException() {
}
/**
* Constructor that allows a shortMessage and exception.
*
* @param httpCode Error Code
* @param shortMessage Short Message
* @param ex Exception
*/
public OzoneException(long httpCode, String shortMessage, Exception ex) {
super(ex);
this.message = ex.getMessage();
this.shortMessage = shortMessage;
this.httpCode = httpCode;
}
/**
* Constructor that allows a shortMessage.
*
* @param httpCode Error Code
* @param shortMessage Short Message
*/
public OzoneException(long httpCode, String shortMessage) {
this.shortMessage = shortMessage;
this.httpCode = httpCode;
}
/**
* Constructor that allows a shortMessage and long message.
*
* @param httpCode Error Code
* @param shortMessage Short Message
* @param message long error message
*/
public OzoneException(long httpCode, String shortMessage, String message) {
this.shortMessage = shortMessage;
this.message = message;
this.httpCode = httpCode;
}
/**
* Constructor that allows a shortMessage, a long message and an exception.
*
* @param httpCode Error code
* @param shortMessage Short message
* @param message Long error message
* @param ex Exception
*/
public OzoneException(long httpCode, String shortMessage,
String message, Exception ex) {
super(ex);
this.shortMessage = shortMessage;
this.message = message;
this.httpCode = httpCode;
}
/**
* Returns the Resource that was involved in the stackTraceString.
*
* @return String
*/
public String getResource() {
return resource;
}
/**
* Sets Resource.
*
* @param resourceName - Name of the Resource
*/
public void setResource(String resourceName) {
this.resource = resourceName;
}
/**
* Gets a detailed message for the error.
*
* @return String
*/
public String getMessage() {
return message;
}
/**
* Sets the error message.
*
* @param longMessage - Long message
*/
public void setMessage(String longMessage) {
this.message = longMessage;
}
/**
* Returns request Id.
*
* @return String
*/
public String getRequestId() {
return requestId;
}
/**
* Sets request ID.
*
* @param ozoneRequestId Request ID generated by the Server
*/
public void setRequestId(String ozoneRequestId) {
this.requestId = ozoneRequestId;
}
/**
* Returns short error string.
*
* @return String
*/
public String getShortMessage() {
return shortMessage;
}
/**
* Sets short error string.
*
* @param shortError Short Error Code
*/
public void setShortMessage(String shortError) {
this.shortMessage = shortError;
}
/**
* Returns hostID.
*
* @return String
*/
public String getHostID() {
return hostID;
}
/**
* Sets host ID.
*
* @param hostName host Name
*/
public void setHostID(String hostName) {
this.hostID = hostName;
}
/**
* Returns http error code.
*
* @return long
*/
public long getHttpCode() {
return httpCode;
}
/**
* Sets http status.
*
* @param httpStatus http error code.
*/
public void setHttpCode(long httpStatus) {
this.httpCode = httpStatus;
}
/**
* Returns a Json String.
*
* @return JSON representation of the Error
*/
public String toJsonString() {
try {
return MAPPER.writeValueAsString(this);
} catch (IOException ex) {
// TODO : Log this error on server side.
}
// TODO : Replace this with a JSON Object -- That represents this error.
return "500 Internal Server Error";
}
/**
* Parses an Exception record.
*
* @param jsonString - Exception in Json format.
*
* @return OzoneException Object
*
* @throws IOException
*/
public static OzoneException parse(String jsonString) throws IOException {
return READER.readValue(jsonString);
}
}

View File

@ -1,74 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.client.rest.headers;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* OZONE specific HTTP headers.
*/
@InterfaceAudience.Private
public final class Header {
public static final String OZONE_QUOTA_BYTES = "BYTES";
public static final String OZONE_QUOTA_MB = "MB";
public static final String OZONE_QUOTA_GB = "GB";
public static final String OZONE_QUOTA_TB = "TB";
public static final String OZONE_QUOTA_REMOVE = "remove";
public static final String OZONE_QUOTA_UNDEFINED = "undefined";
public static final String OZONE_EMPTY_STRING="";
public static final String OZONE_DEFAULT_LIST_SIZE = "1000";
public static final String OZONE_USER = "x-ozone-user";
public static final String OZONE_SIMPLE_AUTHENTICATION_SCHEME = "OZONE";
public static final String OZONE_VERSION_HEADER = "x-ozone-version";
public static final String OZONE_V1_VERSION_HEADER ="v1";
public static final String OZONE_LIST_QUERY_SERVICE = "service";
public static final String OZONE_INFO_QUERY_VOLUME = "volume";
public static final String OZONE_INFO_QUERY_BUCKET = "bucket";
public static final String OZONE_INFO_QUERY_KEY = "key";
public static final String OZONE_INFO_QUERY_KEY_DETAIL = "key-detail";
public static final String OZONE_REQUEST_ID = "x-ozone-request-id";
public static final String OZONE_SERVER_NAME = "x-ozone-server-name";
public static final String OZONE_STORAGE_TYPE = "x-ozone-storage-type";
public static final String OZONE_BUCKET_VERSIONING =
"x-ozone-bucket-versioning";
public static final String OZONE_ACLS = "x-ozone-acls";
public static final String OZONE_ACL_ADD = "ADD";
public static final String OZONE_ACL_REMOVE = "REMOVE";
public static final String OZONE_INFO_QUERY_TAG ="info";
public static final String OZONE_QUOTA_QUERY_TAG ="quota";
public static final String CONTENT_MD5 = "Content-MD5";
public static final String OZONE_LIST_QUERY_PREFIX="prefix";
public static final String OZONE_LIST_QUERY_MAXKEYS="max-keys";
public static final String OZONE_LIST_QUERY_PREVKEY="prev-key";
public static final String OZONE_LIST_QUERY_ROOTSCAN="root-scan";
public static final String OZONE_RENAME_TO_KEY_PARAM_NAME = "toKey";
private Header() {
// Never constructed.
}
}

View File

@ -1,22 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.client.rest.headers;
/**
* Ozone HTTP Header utility.
*/

View File

@ -1,22 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.client.rest;
/**
* Ozone REST interface.
*/

View File

@ -1,248 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.client.rest.response;
import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
import org.apache.hadoop.hdds.protocol.StorageType;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.OzoneConsts;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
import com.google.common.base.Preconditions;
/**
* BucketInfo class is used used for parsing json response
* when BucketInfo Call is made.
*/
public class BucketInfo implements Comparable<BucketInfo> {
private static final ObjectReader READER =
new ObjectMapper().readerFor(BucketInfo.class);
private String volumeName;
private String bucketName;
private String createdOn;
private List<OzoneAcl> acls;
private OzoneConsts.Versioning versioning;
private StorageType storageType;
private String bekName;
/**
* Constructor for BucketInfo.
*
* @param volumeName
* @param bucketName
*/
public BucketInfo(String volumeName, String bucketName) {
this.volumeName = volumeName;
this.bucketName = bucketName;
}
/**
* Default constructor for BucketInfo.
*/
public BucketInfo() {
acls = new LinkedList<>();
}
/**
* Parse a JSON string into BucketInfo Object.
*
* @param jsonString Json String
* @return BucketInfo
* @throws IOException
*/
public static BucketInfo parse(String jsonString) throws IOException {
return READER.readValue(jsonString);
}
/**
* Returns a List of ACLs set on the Bucket.
*
* @return List of Acl
*/
public List<OzoneAcl> getAcls() {
return acls;
}
/**
* Sets ACls.
*
* @param acls Acl list
*/
public void setAcls(List<OzoneAcl> acls) {
this.acls = acls;
}
/**
* Returns Storage Type info.
*
* @return Storage Type of the bucket
*/
public StorageType getStorageType() {
return storageType;
}
/**
* Sets the Storage Type.
*
* @param storageType Storage Type
*/
public void setStorageType(StorageType storageType) {
this.storageType = storageType;
}
/**
* Returns versioning.
*
* @return versioning Enum
*/
public OzoneConsts.Versioning getVersioning() {
return versioning;
}
/**
* Sets Versioning.
*
* @param versioning
*/
public void setVersioning(OzoneConsts.Versioning versioning) {
this.versioning = versioning;
}
/**
* Gets bucket Name.
*
* @return String
*/
public String getBucketName() {
return bucketName;
}
/**
* Sets bucket Name.
*
* @param bucketName Name of the bucket
*/
public void setBucketName(String bucketName) {
this.bucketName = bucketName;
}
/**
* Sets creation time of the bucket.
*
* @param creationTime Date String
*/
public void setCreatedOn(String creationTime) {
this.createdOn = creationTime;
}
/**
* Returns creation time.
*
* @return creation time of bucket.
*/
public String getCreatedOn() {
return createdOn;
}
/**
* Returns Volume Name.
*
* @return String volume name
*/
public String getVolumeName() {
return volumeName;
}
/**
* Sets the Volume Name of bucket.
*
* @param volumeName volumeName
*/
public void setVolumeName(String volumeName) {
this.volumeName = volumeName;
}
/**
* Return bucket encryption key name.
* @return bucket encryption key name
*/
public String getEncryptionKeyName() {
return bekName;
}
/**
* Sets the bucket encryption key name.
* @param name bucket encryption key name
*/
public void setEncryptionKeyName(String name) {
this.bekName = name;
}
/**
* Compares this object with the specified object for order. Returns a
* negative integer, zero, or a positive integer as this object is less
* than, equal to, or greater than the specified object.
*
* Please note : BucketInfo compare functions are used only within the
* context of a volume, hence volume name is purposefully ignored in
* compareTo, equal and hashcode functions of this class.
*/
@Override
public int compareTo(BucketInfo o) {
Preconditions.checkState(o.getVolumeName().equals(this.getVolumeName()));
return this.bucketName.compareTo(o.getBucketName());
}
/**
* Checks if two bucketInfo's are equal.
* @param o Object BucketInfo
* @return True or False
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof BucketInfo)) {
return false;
}
BucketInfo that = (BucketInfo) o;
Preconditions.checkState(that.getVolumeName().equals(this.getVolumeName()));
return bucketName.equals(that.bucketName);
}
/**
* Hash Code for this object.
* @return int
*/
@Override
public int hashCode() {
return bucketName.hashCode();
}
}

View File

@ -1,236 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.client.rest.response;
import java.io.IOException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.hdds.client.ReplicationType;
/**
* KeyInfo class is used used for parsing json response
* when KeyInfo Call is made.
*/
public class KeyInfo implements Comparable<KeyInfo> {
private static final ObjectReader READER =
new ObjectMapper().readerFor(KeyInfo.class);
private long version;
private String md5hash;
private String createdOn;
private String modifiedOn;
private long size;
private String keyName;
private ReplicationType type;
/**
* Return replication type of the key.
*
* @return replication type
*/
public ReplicationType getType() {
return type;
}
/**
* Set replication type of the key.
*
* @param replicationType
*/
public void setType(ReplicationType replicationType) {
this.type = replicationType;
}
/**
* When this key was created.
*
* @return Date String
*/
public String getCreatedOn() {
return createdOn;
}
/**
* When this key was modified.
*
* @return Date String
*/
public String getModifiedOn() {
return modifiedOn;
}
/**
* When this key was created.
*
* @param createdOn Date String
*/
public void setCreatedOn(String createdOn) {
this.createdOn = createdOn;
}
/**
* When this key was modified.
*
* @param modifiedOn Date String
*/
public void setModifiedOn(String modifiedOn) {
this.modifiedOn = modifiedOn;
}
/**
* Gets the Key name of this object.
*
* @return String
*/
public String getKeyName() {
return keyName;
}
/**
* Sets the Key name of this object.
*
* @param keyName String
*/
public void setKeyName(String keyName) {
this.keyName = keyName;
}
/**
* Returns the MD5 Hash for the data of this key.
*
* @return String MD5
*/
public String getMd5hash() {
return md5hash;
}
/**
* Sets the MD5 value of this key.
*
* @param md5hash Md5 of this file
*/
public void setMd5hash(String md5hash) {
this.md5hash = md5hash;
}
/**
* Number of bytes stored in the data part of this key.
*
* @return long size of the data file
*/
public long getSize() {
return size;
}
/**
* Sets the size of the data part of this key.
*
* @param size Size in long
*/
public void setSize(long size) {
this.size = size;
}
/**
* Version of this key.
*
* @return returns the version of this key.
*/
public long getVersion() {
return version;
}
/**
* Sets the version of this key.
*
* @param version - Version String
*/
public void setVersion(long version) {
this.version = version;
}
/**
* Compares this object with the specified object for order. Returns a
* negative integer, zero, or a positive integer as this object is less
* than, equal to, or greater than the specified object.
*
* @param o the object to be compared.
* @return a negative integer, zero, or a positive integer as this object
* is less than, equal to, or greater than the specified object.
* @throws NullPointerException if the specified object is null
* @throws ClassCastException if the specified object's type prevents it
* from being compared to this object.
*/
@Override
public int compareTo(KeyInfo o) {
if (this.keyName.compareTo(o.getKeyName()) != 0) {
return this.keyName.compareTo(o.getKeyName());
}
if (this.getVersion() == o.getVersion()) {
return 0;
}
if (this.getVersion() < o.getVersion()) {
return -1;
}
return 1;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
KeyInfo keyInfo = (KeyInfo) o;
return new EqualsBuilder()
.append(version, keyInfo.version)
.append(keyName, keyInfo.keyName)
.isEquals();
}
@Override
public int hashCode() {
return new HashCodeBuilder(17, 37)
.append(version)
.append(keyName)
.toHashCode();
}
/**
* Parse a string to return KeyInfo Object.
*
* @param jsonString Json String
* @return keyInfo
* @throws IOException
*/
public static KeyInfo parse(String jsonString) throws IOException {
return READER.readValue(jsonString);
}
}

View File

@ -1,118 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.client.rest.response;
import java.io.IOException;
import java.util.List;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.fs.FileEncryptionInfo;
/**
* KeyInfoDetails class is used for parsing json response
* when KeyInfoDetails Call is made.
*/
public class KeyInfoDetails extends KeyInfo {
private static final ObjectReader READER =
new ObjectMapper().readerFor(KeyInfoDetails.class);
/**
* a list of Map which maps localID to ContainerID
* to specify replica locations.
*/
private List<KeyLocation> keyLocations;
private FileEncryptionInfo feInfo;
/**
* Constructor needed for json serialization.
*/
public KeyInfoDetails() {
}
/**
* Set details of key location.
*
* @param locations - details of key location
*/
public void setKeyLocation(List<KeyLocation> locations) {
this.keyLocations = locations;
}
/**
* Returns details of key location.
*
* @return volumeName
*/
public List<KeyLocation> getKeyLocations() {
return keyLocations;
}
public void setFileEncryptionInfo(FileEncryptionInfo info) {
this.feInfo = info;
}
public FileEncryptionInfo getFileEncryptionInfo() {
return feInfo;
}
/**
* Parse a string to return KeyInfoDetails Object.
*
* @param jsonString Json String
* @return KeyInfoDetails
* @throws IOException
*/
public static KeyInfoDetails parse(String jsonString) throws IOException {
return READER.readValue(jsonString);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
KeyInfoDetails that = (KeyInfoDetails) o;
return new EqualsBuilder()
.append(getVersion(), that.getVersion())
.append(getKeyName(), that.getKeyName())
.append(keyLocations, that.keyLocations)
.isEquals();
}
@Override
public int hashCode() {
return new HashCodeBuilder(21, 33)
.append(getVersion())
.append(getKeyName())
.append(keyLocations)
.toHashCode();
}
}

View File

@ -1,89 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.client.rest.response;
/**
* KeyLocation class is used used for parsing json response
* when KeyInfoDetails Call is made.
*/
public class KeyLocation {
/**
* Which container this key stored.
*/
private long containerID;
/**
* Which block this key stored inside a container.
*/
private long localID;
/**
* Data length of this key replica.
*/
private long length;
/**
* Offset of this key.
*/
private long offset;
/**
* Empty constructor for Json serialization.
*/
public KeyLocation() {
}
/**
* Constructs KeyLocation.
*/
public KeyLocation(long containerID, long localID,
long length, long offset) {
this.containerID = containerID;
this.localID = localID;
this.length = length;
this.offset = offset;
}
/**
* Returns the containerID of this Key.
*/
public long getContainerID() {
return containerID;
}
/**
* Returns the localID of this Key.
*/
public long getLocalID() {
return localID;
}
/**
* Returns the length of this Key.
*/
public long getLength() {
return length;
}
/**
* Returns the offset of this Key.
*/
public long getOffset() {
return offset;
}
}

View File

@ -1,215 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.client.rest.response;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdds.client.OzoneQuota;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
/**
* VolumeInfo Class is used for parsing json response
* when VolumeInfo Call is made.
*/
@InterfaceAudience.Private
public class VolumeInfo implements Comparable<VolumeInfo> {
private static final ObjectReader READER =
new ObjectMapper().readerFor(VolumeInfo.class);
private VolumeOwner owner;
private OzoneQuota quota;
private String volumeName;
private String createdOn;
private String createdBy;
/**
* Constructor for VolumeInfo.
*
* @param volumeName - Name of the Volume
* @param createdOn _ Date String
* @param createdBy - Person who created it
*/
public VolumeInfo(String volumeName, String createdOn,
String createdBy) {
this.volumeName = volumeName;
this.createdOn = createdOn;
this.createdBy = createdBy;
}
/**
* Constructor for VolumeInfo.
*/
public VolumeInfo() {
}
/**
* gets the volume name.
*
* @return Volume Name
*/
public String getVolumeName() {
return volumeName;
}
/**
* Sets the volume name.
*
* @param volumeName Volume Name
*/
public void setVolumeName(String volumeName) {
this.volumeName = volumeName;
}
/**
* Returns the name of the person who created this volume.
*
* @return Name of Admin who created this
*/
public String getCreatedBy() {
return createdBy;
}
/**
* Sets the user name of the person who created this volume.
*
* @param createdBy UserName
*/
public void setCreatedBy(String createdBy) {
this.createdBy = createdBy;
}
/**
* Gets the date on which this volume was created.
*
* @return Date String
*/
public String getCreatedOn() {
return createdOn;
}
/**
* Sets the date string.
*
* @param createdOn Date String
*/
public void setCreatedOn(String createdOn) {
this.createdOn = createdOn;
}
/**
* Returns the owner info.
*
* @return OwnerInfo
*/
public VolumeOwner getOwner() {
return owner;
}
/**
* Sets the owner.
*
* @param owner OwnerInfo
*/
public void setOwner(VolumeOwner owner) {
this.owner = owner;
}
/**
* Returns the quota information on a volume.
*
* @return Quota
*/
public OzoneQuota getQuota() {
return quota;
}
/**
* Sets the quota info.
*
* @param quota Quota Info
*/
public void setQuota(OzoneQuota quota) {
this.quota = quota;
}
/**
* Comparable Interface.
* @param o VolumeInfo Object.
* @return Result of comparison
*/
@Override
public int compareTo(VolumeInfo o) {
return this.volumeName.compareTo(o.getVolumeName());
}
/**
* Returns VolumeInfo class from json string.
*
* @param data Json String
*
* @return VolumeInfo
*
* @throws IOException
*/
public static VolumeInfo parse(String data) throws IOException {
return READER.readValue(data);
}
/**
* Indicates whether some other object is "equal to" this one.
*
* @param obj the reference object with which to compare.
*
* @return {@code true} if this object is the same as the obj
* argument; {@code false} otherwise.
*/
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
VolumeInfo otherInfo = (VolumeInfo) obj;
return otherInfo.getVolumeName().equals(this.getVolumeName());
}
/**
* Returns a hash code value for the object. This method is
* supported for the benefit of hash tables such as those provided by
* HashMap.
* @return a hash code value for this object.
*
* @see Object#equals(Object)
* @see System#identityHashCode
*/
@Override
public int hashCode() {
return getVolumeName().hashCode();
}
}

View File

@ -1,61 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.client.rest.response;
import org.apache.hadoop.classification.InterfaceAudience;
import com.fasterxml.jackson.annotation.JsonInclude;
/**
* Volume Owner represents the owner of a volume.
*
* This is a class instead of a string since we might need to extend this class
* to support other forms of authentication.
*/
@InterfaceAudience.Private
public class VolumeOwner {
@JsonInclude(JsonInclude.Include.NON_NULL)
private String name;
/**
* Constructor for VolumeOwner.
*
* @param name name of the User
*/
public VolumeOwner(String name) {
this.name = name;
}
/**
* Constructs Volume Owner.
*/
public VolumeOwner() {
name = null;
}
/**
* Returns the user name.
*
* @return Name
*/
public String getName() {
return name;
}
}

View File

@ -1,24 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.hadoop.ozone.client.rest.response;
/**
* This package contains class for ozone rest client library.
*/

View File

@ -24,13 +24,11 @@ import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
import com.fasterxml.jackson.databind.ObjectWriter;
import com.google.common.base.Preconditions;
import org.apache.hadoop.ozone.client.rest.response.BucketInfo;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.ServicePort;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
@ -165,27 +163,6 @@ public final class ServiceInfo {
serviceInfo.getServicePortsList());
}
/**
* Returns a JSON string of this object.
*
* @return String - json string
* @throws IOException
*/
public String toJsonString() throws IOException {
return WRITER.writeValueAsString(this);
}
/**
* Parse a JSON string into ServiceInfo Object.
*
* @param jsonString Json String
* @return BucketInfo
* @throws IOException
*/
public static BucketInfo parse(String jsonString) throws IOException {
return READER.readValue(jsonString);
}
/**
* Creates a new builder to build {@link ServiceInfo}.
* @return {@link ServiceInfo.Builder}

View File

@ -1,134 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.handlers;
import org.apache.hadoop.hdds.protocol.StorageType;
import org.apache.hadoop.ozone.OzoneConsts;
/**
* BucketArgs packages all bucket related arguments to
* file system calls.
*/
public class BucketArgs extends VolumeArgs {
private final String bucketName;
private OzoneConsts.Versioning versioning;
private StorageType storageType;
/**
* Constructor for BucketArgs.
*
* @param volumeName - volumeName
* @param bucketName - bucket Name
* @param userArgs - userArgs
*/
public BucketArgs(String volumeName, String bucketName, UserArgs userArgs) {
super(volumeName, userArgs);
this.bucketName = bucketName;
this.versioning = OzoneConsts.Versioning.NOT_DEFINED;
this.storageType = null;
}
/**
* Constructor for BucketArgs.
*
* @param bucketName - bucket Name
* @param volumeArgs - volume Args
*/
public BucketArgs(String bucketName, VolumeArgs volumeArgs) {
super(volumeArgs);
this.bucketName = bucketName;
this.versioning = OzoneConsts.Versioning.NOT_DEFINED;
this.storageType = null;
}
/**
* Constructor for BucketArgs.
*
* @param args - Bucket Args
*/
public BucketArgs(BucketArgs args) {
this(args.getBucketName(), args);
}
/**
* Returns the Bucket Name.
*
* @return Bucket Name
*/
public String getBucketName() {
return bucketName;
}
/**
* Returns Versioning Info.
*
* @return versioning
*/
public OzoneConsts.Versioning getVersioning() {
return versioning;
}
/**
* SetVersioning Info.
*
* @param versioning - Enum value
*/
public void setVersioning(OzoneConsts.Versioning versioning) {
this.versioning = versioning;
}
/**
* returns the current Storage Class.
*
* @return Storage Class
*/
public StorageType getStorageType() {
return storageType;
}
/**
* Sets the Storage Class.
*
* @param storageType Set Storage Class
*/
public void setStorageType(StorageType storageType) {
this.storageType = storageType;
}
/**
* returns - Volume/bucketName.
*
* @return String
*/
@Override
public String getResourceName() {
return getVolumeName() + "/" + getBucketName();
}
/**
* Returns User/Volume name which is the parent of this
* bucket.
*
* @return String
*/
public String getParentName() {
return getUserName() + "/" + getVolumeName();
}
}

View File

@ -1,117 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.handlers;
/**
* Class that packages all key Arguments.
*/
public class KeyArgs extends BucketArgs {
private String key;
private String hash;
private long size;
/**
* Constructor for Key Args.
*
* @param volumeName - Volume Name
* @param bucketName - Bucket Name
* @param objectName - Key
*/
public KeyArgs(String volumeName, String bucketName,
String objectName, UserArgs args) {
super(volumeName, bucketName, args);
this.key = objectName;
}
/**
* Constructor for Key Args.
*
* @param objectName - Key
* @param args - Bucket Args
*/
public KeyArgs(String objectName, BucketArgs args) {
super(args);
this.key = objectName;
}
/**
* Get Key Name.
*
* @return String
*/
public String getKeyName() {
return this.key;
}
/**
* Computed File hash.
*
* @return String
*/
public String getHash() {
return hash;
}
/**
* Sets the hash String.
*
* @param hash String
*/
public void setHash(String hash) {
this.hash = hash;
}
/**
* Returns the file size.
*
* @return long - file size
*/
public long getSize() {
return size;
}
/**
* Set Size.
*
* @param size Size of the file
*/
public void setSize(long size) {
this.size = size;
}
/**
* Returns the name of the resource.
*
* @return String
*/
@Override
public String getResourceName() {
return super.getResourceName() + "/" + getKeyName();
}
/**
* Parent name of this resource.
*
* @return String.
*/
@Override
public String getParentName() {
return super.getResourceName();
}
}

View File

@ -1,142 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.handlers;
/**
* Supports listing keys with pagination.
*/
public class ListArgs<T extends UserArgs> {
private String prevKey;
private String prefix;
private int maxKeys;
private boolean rootScan;
private T args;
/**
* Constructor for ListArgs.
*
* @param args - BucketArgs
* @param prefix Prefix to start Query from
* @param maxKeys Max result set
* @param prevKey - Page token
*/
public ListArgs(T args, String prefix, int maxKeys,
String prevKey) {
setArgs(args);
setPrefix(prefix);
setMaxKeys(maxKeys);
setPrevKey(prevKey);
}
/**
* Copy Constructor for ListArgs.
*
* @param args - List Args
*/
public ListArgs(T args, ListArgs listArgs) {
this(args, listArgs.getPrefix(), listArgs.getMaxKeys(),
listArgs.getPrevKey());
}
/**
* Returns page token.
*
* @return String
*/
public String getPrevKey() {
return prevKey;
}
/**
* Sets page token.
*
* @param prevKey - Page token
*/
public void setPrevKey(String prevKey) {
this.prevKey = prevKey;
}
/**
* Gets max keys.
*
* @return int
*/
public int getMaxKeys() {
return maxKeys;
}
/**
* Sets max keys.
*
* @param maxKeys - Maximum keys to return
*/
public void setMaxKeys(int maxKeys) {
this.maxKeys = maxKeys;
}
/**
* Gets prefix.
*
* @return String
*/
public String getPrefix() {
return prefix;
}
/**
* Sets prefix.
*
* @param prefix - The prefix that we are looking for
*/
public void setPrefix(String prefix) {
this.prefix = prefix;
}
/**
* Gets args.
* @return T
*/
public T getArgs() {
return args;
}
/**
* Sets args.
* @param args T
*/
public void setArgs(T args) {
this.args = args;
}
/**
* Checks if we are doing a rootScan.
* @return - RootScan.
*/
public boolean isRootScan() {
return rootScan;
}
/**
* Sets the RootScan property.
* @param rootScan - Boolean.
*/
public void setRootScan(boolean rootScan) {
this.rootScan = rootScan;
}
}

View File

@ -1,172 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.handlers;
import org.apache.hadoop.classification.InterfaceAudience;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.Request;
import javax.ws.rs.core.UriInfo;
import java.util.Arrays;
/**
* UserArgs is used to package caller info
* and pass it down to file system.
*/
@InterfaceAudience.Private
public class UserArgs {
private String userName;
private final String requestID;
private final String hostName;
private final UriInfo uri;
private final Request request;
private final HttpHeaders headers;
private String[] groups;
/**
* Constructs user args.
*
* @param userName - User name
* @param requestID - Request ID
* @param hostName - Host Name
* @param req - Request
* @param info - Uri Info
* @param httpHeaders - http headers
*/
public UserArgs(String userName, String requestID, String hostName,
Request req, UriInfo info, HttpHeaders httpHeaders) {
this.hostName = hostName;
this.userName = userName;
this.requestID = requestID;
this.uri = info;
this.request = req;
this.headers = httpHeaders;
}
/**
* Constructs user args when we don't know the user name yet.
*
* @param requestID _ Request ID
* @param hostName - Host Name
* @param req - Request
* @param info - UriInfo
* @param httpHeaders - http headers
*/
public UserArgs(String requestID, String hostName, Request req, UriInfo info,
HttpHeaders httpHeaders) {
this.hostName = hostName;
this.requestID = requestID;
this.uri = info;
this.request = req;
this.headers = httpHeaders;
}
/**
* Returns hostname.
*
* @return String
*/
public String getHostName() {
return hostName;
}
/**
* Returns RequestID.
*
* @return Long
*/
public String getRequestID() {
return requestID;
}
/**
* Returns User Name.
*
* @return String
*/
public String getUserName() {
return userName;
}
/**
* Sets the user name.
*
* @param userName Name of the user
*/
public void setUserName(String userName) {
this.userName = userName;
}
/**
* Returns list of groups.
*
* @return String[]
*/
public String[] getGroups() {
return groups != null ?
Arrays.copyOf(groups, groups.length) : null;
}
/**
* Sets the group list.
*
* @param groups list of groups
*/
public void setGroups(String[] groups) {
if (groups != null) {
this.groups = Arrays.copyOf(groups, groups.length);
}
}
/**
* Returns the resource Name.
*
* @return String Resource.
*/
public String getResourceName() {
return getUserName();
}
/**
* Returns Http Headers for this call.
*
* @return httpHeaders
*/
public HttpHeaders getHeaders() {
return headers;
}
/**
* Returns Request Object.
*
* @return Request
*/
public Request getRequest() {
return request;
}
/**
* Returns UriInfo.
*
* @return UriInfo
*/
public UriInfo getUri() {
return uri;
}
}

View File

@ -1,143 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.handlers;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ozone.web.request.OzoneQuota;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.Request;
import javax.ws.rs.core.UriInfo;
/**
* VolumeArgs is used to package all volume
* related arguments in the call to underlying
* file system.
*/
@InterfaceAudience.Private
public class VolumeArgs extends UserArgs {
private String adminName;
private final String volumeName;
private OzoneQuota quota;
/**
* Returns Quota Information.
*
* @return Quota
*/
public OzoneQuota getQuota() {
return quota;
}
/**
* Returns volume name.
*
* @return String
*/
public String getVolumeName() {
return volumeName;
}
/**
* Constructs volume Args.
*
* @param userName - User name
* @param volumeName - volume Name
* @param requestID _ Request ID
* @param hostName - Host Name
* @param request - Http Request
* @param info - URI info
* @param headers - http headers
* @param groups - list of groups allowed to access the volume
*/
@SuppressWarnings("parameternumber")
public VolumeArgs(String userName, String volumeName, String requestID,
String hostName, Request request, UriInfo info,
HttpHeaders headers, String[] groups) {
super(userName, requestID, hostName, request, info, headers);
super.setGroups(groups);
this.volumeName = volumeName;
}
/**
* Constructs volume Args.
*
* @param volumeName - volume Name
* @param userArgs - userArgs
*/
public VolumeArgs(String volumeName, UserArgs userArgs) {
this(userArgs.getUserName(), volumeName, userArgs.getRequestID(),
userArgs.getHostName(), userArgs.getRequest(), userArgs.getUri(),
userArgs.getHeaders(), userArgs.getGroups());
}
/**
* Creates VolumeArgs from another VolumeArgs.
*/
public VolumeArgs(VolumeArgs volArgs) {
this(volArgs.getVolumeName(), volArgs);
}
/**
* Sets Quota information.
*
* @param quota - Quota Sting
* @throws IllegalArgumentException
*/
public void setQuota(String quota) throws IllegalArgumentException {
this.quota = OzoneQuota.parseQuota(quota);
}
/**
* Sets quota information.
*
* @param quota - OzoneQuota
*/
public void setQuota(OzoneQuota quota) {
this.quota = quota;
}
/**
* Gets admin Name.
*
* @return - Admin Name
*/
public String getAdminName() {
return adminName;
}
/**
* Sets Admin Name.
*
* @param adminName - Admin Name
*/
public void setAdminName(String adminName) {
this.adminName = adminName;
}
/**
* Returns UserName/VolumeName.
*
* @return String
*/
@Override
public String getResourceName() {
return super.getResourceName() + "/" + getVolumeName();
}
}

View File

@ -1,22 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.handlers;
/**
* REST handler value classes.
*/

View File

@ -1,214 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.request;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.client.rest.headers.Header;
import com.fasterxml.jackson.annotation.JsonIgnore;
/**
* represents an OzoneQuota Object that can be applied to
* a storage volume.
*/
@InterfaceAudience.Private
public class OzoneQuota {
private Units unit;
private int size;
/** Quota Units.*/
public enum Units {UNDEFINED, BYTES, MB, GB, TB}
/**
* Returns size.
*
* @return int
*/
public int getSize() {
return size;
}
/**
* Returns Units.
*
* @return Unit in MB, GB or TB
*/
public Units getUnit() {
return unit;
}
/**
* Constructs a default Quota object.
*/
public OzoneQuota() {
this.size = 0;
this.unit = Units.UNDEFINED;
}
/**
* Constructor for Ozone Quota.
*
* @param size - Integer Size
* @param unit MB, GB or TB
*/
public OzoneQuota(int size, Units unit) {
this.size = size;
this.unit = unit;
}
/**
* Formats a quota as a string.
*
* @param quota the quota to format
* @return string representation of quota
*/
public static String formatQuota(OzoneQuota quota) {
return String.valueOf(quota.size) + quota.unit;
}
/**
* Parses a user provided string and returns the
* Quota Object.
*
* @param quotaString Quota String
*
* @return OzoneQuota object
*
* @throws IllegalArgumentException
*/
public static OzoneQuota parseQuota(String quotaString)
throws IllegalArgumentException {
if ((quotaString == null) || (quotaString.isEmpty())) {
throw new IllegalArgumentException(
"Quota string cannot be null or empty.");
}
if (isRemove(quotaString)) {
throw new IllegalArgumentException("Remove is invalid in this context.");
}
String uppercase = quotaString.toUpperCase().replaceAll("\\s+", "");
String size = "";
int nSize;
Units currUnit = Units.MB;
Boolean found = false;
if (uppercase.endsWith(Header.OZONE_QUOTA_MB)) {
size = uppercase
.substring(0, uppercase.length() - Header.OZONE_QUOTA_MB.length());
currUnit = Units.MB;
found = true;
}
if (uppercase.endsWith(Header.OZONE_QUOTA_GB)) {
size = uppercase
.substring(0, uppercase.length() - Header.OZONE_QUOTA_GB.length());
currUnit = Units.GB;
found = true;
}
if (uppercase.endsWith(Header.OZONE_QUOTA_TB)) {
size = uppercase
.substring(0, uppercase.length() - Header.OZONE_QUOTA_TB.length());
currUnit = Units.TB;
found = true;
}
if (uppercase.endsWith(Header.OZONE_QUOTA_BYTES)) {
size = uppercase
.substring(0, uppercase.length() - Header.OZONE_QUOTA_BYTES.length());
currUnit = Units.BYTES;
found = true;
}
if (!found) {
throw new IllegalArgumentException(
"Quota unit not recognized. Supported values are BYTES, MB, GB and " +
"TB.");
}
nSize = Integer.parseInt(size);
if (nSize < 0) {
throw new IllegalArgumentException("Quota cannot be negative.");
}
return new OzoneQuota(nSize, currUnit);
}
/**
* Checks if Quota String is just as remove operation.
*
* @param quotaString User provided quota String
*
* @return True if it is Remove, false otherwise
*/
public static boolean isRemove(String quotaString) {
return (quotaString != null) &&
(quotaString.compareToIgnoreCase(Header.OZONE_QUOTA_REMOVE) == 0);
}
/**
* Returns size in Bytes or -1 if there is no Quota.
*/
@JsonIgnore
public long sizeInBytes() {
switch (this.unit) {
case BYTES:
return this.getSize();
case MB:
return this.getSize() * OzoneConsts.MB;
case GB:
return this.getSize() * OzoneConsts.GB;
case TB:
return this.getSize() * OzoneConsts.TB;
case UNDEFINED:
default:
return -1;
}
}
/**
* Returns OzoneQuota corresponding to size in bytes.
*
* @param sizeInBytes size in bytes to be converted
*
* @return OzoneQuota object
*/
public static OzoneQuota getOzoneQuota(long sizeInBytes) {
long size;
Units unit;
if (sizeInBytes % OzoneConsts.TB == 0) {
size = sizeInBytes / OzoneConsts.TB;
unit = Units.TB;
} else if (sizeInBytes % OzoneConsts.GB == 0) {
size = sizeInBytes / OzoneConsts.GB;
unit = Units.GB;
} else if (sizeInBytes % OzoneConsts.MB == 0) {
size = sizeInBytes / OzoneConsts.MB;
unit = Units.MB;
} else {
size = sizeInBytes;
unit = Units.BYTES;
}
return new OzoneQuota((int)size, unit);
}
}

View File

@ -1,23 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Netty-based HTTP server implementation for Ozone.
*/
package org.apache.hadoop.ozone.web.request;

View File

@ -1,335 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.response;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.hdds.protocol.StorageType;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.web.utils.JsonUtils;
import com.fasterxml.jackson.annotation.JsonAutoDetect;
import com.fasterxml.jackson.annotation.JsonFilter;
import com.fasterxml.jackson.annotation.PropertyAccessor;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
import com.fasterxml.jackson.databind.ObjectWriter;
import com.fasterxml.jackson.databind.ser.FilterProvider;
import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter;
import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider;
import com.google.common.base.Preconditions;
/**
* BucketInfo class, this is used as response class to send
* Json info about a bucket back to a client.
*/
public class BucketInfo implements Comparable<BucketInfo> {
static final String BUCKET_INFO = "BUCKET_INFO_FILTER";
private static final ObjectReader READER =
new ObjectMapper().readerFor(BucketInfo.class);
private static final ObjectWriter WRITER;
static {
ObjectMapper mapper = new ObjectMapper();
String[] ignorableFieldNames = {"bytesUsed", "keyCount"};
FilterProvider filters = new SimpleFilterProvider().addFilter(BUCKET_INFO,
SimpleBeanPropertyFilter.serializeAllExcept(ignorableFieldNames));
mapper.setVisibility(PropertyAccessor.FIELD, JsonAutoDetect.Visibility.ANY);
mapper.addMixIn(Object.class, MixIn.class);
mapper.setFilterProvider(filters);
WRITER = mapper.writerWithDefaultPrettyPrinter();
}
private String volumeName;
private String bucketName;
private String createdOn;
private List<OzoneAcl> acls;
private OzoneConsts.Versioning versioning;
private StorageType storageType;
private long bytesUsed;
private long keyCount;
private String encryptionKeyName;
/**
* Constructor for BucketInfo.
*
* @param volumeName
* @param bucketName
*/
public BucketInfo(String volumeName, String bucketName) {
this.volumeName = volumeName;
this.bucketName = bucketName;
}
/**
* Default constructor for BucketInfo.
*/
public BucketInfo() {
acls = new ArrayList<>();
}
/**
* Parse a JSON string into BucketInfo Object.
*
* @param jsonString - Json String
*
* @return - BucketInfo
*
* @throws IOException
*/
public static BucketInfo parse(String jsonString) throws IOException {
return READER.readValue(jsonString);
}
/**
* Returns a List of ACL on the Bucket.
*
* @return List of Acls
*/
public List<OzoneAcl> getAcls() {
return acls;
}
/**
* Sets ACls.
*
* @param acls - Acls list
*/
public void setAcls(List<OzoneAcl> acls) {
this.acls = acls;
}
/**
* Returns Storage Type info.
*
* @return Storage Type of the bucket
*/
public StorageType getStorageType() {
return storageType;
}
/**
* Sets the Storage Type.
*
* @param storageType - Storage Type
*/
public void setStorageType(StorageType storageType) {
this.storageType = storageType;
}
/**
* Returns versioning.
*
* @return versioning Enum
*/
public OzoneConsts.Versioning getVersioning() {
return versioning;
}
/**
* Sets Versioning.
*
* @param versioning
*/
public void setVersioning(OzoneConsts.Versioning versioning) {
this.versioning = versioning;
}
/**
* Gets bucket Name.
*
* @return String
*/
public String getBucketName() {
return bucketName;
}
/**
* Sets bucket Name.
*
* @param bucketName - Name of the bucket
*/
public void setBucketName(String bucketName) {
this.bucketName = bucketName;
}
/**
* Sets creation time of the bucket.
*
* @param creationTime - Date String
*/
public void setCreatedOn(String creationTime) {
this.createdOn = creationTime;
}
/**
* Returns creation time.
*
* @return creation time of bucket.
*/
public String getCreatedOn() {
return createdOn;
}
public void setEncryptionKeyName(String encryptionKeyName) {
this.encryptionKeyName = encryptionKeyName;
}
public String getEncryptionKeyName() {
return encryptionKeyName;
}
/**
* Returns a JSON string of this object.
* After stripping out bytesUsed and keyCount
*
* @return String
*/
public String toJsonString() throws IOException {
return WRITER.writeValueAsString(this);
}
/**
* Returns the Object as a Json String.
*
* The reason why both toJSONString exists and toDBString exists
* is because toJSONString supports an external facing contract with
* REST clients. However server internally would want to add more
* fields to this class. The distinction helps in serializing all
* fields vs. only fields that are part of REST protocol.
*/
public String toDBString() throws IOException {
return JsonUtils.toJsonString(this);
}
/**
* Returns Volume Name.
*
* @return String volume name
*/
public String getVolumeName() {
return volumeName;
}
/**
* Sets the Volume Name of the bucket.
*
* @param volumeName - volumeName
*/
public void setVolumeName(String volumeName) {
this.volumeName = volumeName;
}
/**
* Compares this object with the specified object for order. Returns a
* negative integer, zero, or a positive integer as this object is less
* than, equal to, or greater than the specified object.
*
* Please note : BucketInfo compare functions are used only within the
* context of a volume, hence volume name is purposefully ignored in
* compareTo, equal and hashcode functions of this class.
*/
@Override
public int compareTo(BucketInfo o) {
Preconditions.checkState(o.getVolumeName().equals(this.getVolumeName()));
return this.bucketName.compareTo(o.getBucketName());
}
/**
* Checks if two bucketInfo's are equal.
* @param o Object BucketInfo
* @return True or False
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof BucketInfo)) {
return false;
}
BucketInfo that = (BucketInfo) o;
Preconditions.checkState(that.getVolumeName().equals(this.getVolumeName()));
return bucketName.equals(that.bucketName);
}
/**
* Hash Code for this object.
* @return int
*/
@Override
public int hashCode() {
return bucketName.hashCode();
}
/**
* Get the number of bytes used by this bucket.
*
* @return long
*/
public long getBytesUsed() {
return bytesUsed;
}
/**
* Set bytes Used.
*
* @param bytesUsed - bytesUsed
*/
public void setBytesUsed(long bytesUsed) {
this.bytesUsed = bytesUsed;
}
/**
* Get Key Count inside this bucket.
*
* @return - KeyCount
*/
public long getKeyCount() {
return keyCount;
}
/**
* Set Key Count inside this bucket.
*
* @param keyCount - Sets the Key Count
*/
public void setKeyCount(long keyCount) {
this.keyCount = keyCount;
}
/**
* This class allows us to create custom filters
* for the Json serialization.
*/
@JsonFilter(BUCKET_INFO)
static class MixIn {
}
}

View File

@ -1,311 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.response;
import java.io.IOException;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.ozone.web.utils.JsonUtils;
import com.fasterxml.jackson.annotation.JsonAutoDetect;
import com.fasterxml.jackson.annotation.JsonFilter;
import com.fasterxml.jackson.annotation.PropertyAccessor;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
import com.fasterxml.jackson.databind.ObjectWriter;
import com.fasterxml.jackson.databind.ser.FilterProvider;
import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter;
import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider;
/**
* Represents an Ozone key Object.
*/
public class KeyInfo implements Comparable<KeyInfo> {
static final String OBJECT_INFO = "OBJECT_INFO_FILTER";
private static final ObjectReader READER =
new ObjectMapper().readerFor(KeyInfo.class);
private static final ObjectWriter WRITER;
static {
ObjectMapper mapper = new ObjectMapper();
String[] ignorableFieldNames = {"dataFileName"};
FilterProvider filters = new SimpleFilterProvider()
.addFilter(OBJECT_INFO, SimpleBeanPropertyFilter
.serializeAllExcept(ignorableFieldNames));
mapper.setVisibility(PropertyAccessor.FIELD,
JsonAutoDetect.Visibility.ANY);
mapper.addMixIn(Object.class, MixIn.class);
mapper.setFilterProvider(filters);
WRITER = mapper.writerWithDefaultPrettyPrinter();
}
/**
* This class allows us to create custom filters
* for the Json serialization.
*/
@JsonFilter(OBJECT_INFO)
class MixIn {
}
private long version;
private String md5hash;
private String createdOn;
private String modifiedOn;
private long size;
private String keyName;
private String dataFileName;
private ReplicationType type;
/**
* Return replication type of the key.
*
* @return replication type
*/
public ReplicationType getType() {
return type;
}
/**
* Set replication type of the key.
*
* @param replicationType
*/
public void setType(ReplicationType replicationType) {
this.type = replicationType;
}
/**
* When this key was created.
*
* @return Date String
*/
public String getCreatedOn() {
return createdOn;
}
/**
* When this key was modified.
*
* @return Date String
*/
public String getModifiedOn() {
return modifiedOn;
}
/**
* When this key was created.
*
* @param createdOn - Date String
*/
public void setCreatedOn(String createdOn) {
this.createdOn = createdOn;
}
/**
* When this key was modified.
*
* @param modifiedOn - Date String
*/
public void setModifiedOn(String modifiedOn) {
this.modifiedOn = modifiedOn;
}
/**
* Full path to where the actual data for this key is stored.
*
* @return String
*/
public String getDataFileName() {
return dataFileName;
}
/**
* Sets up where the file path is stored.
*
* @param dataFileName - Data File Name
*/
public void setDataFileName(String dataFileName) {
this.dataFileName = dataFileName;
}
/**
* Gets the Keyname of this object.
*
* @return String
*/
public String getKeyName() {
return keyName;
}
/**
* Sets the Key name of this object.
*
* @param keyName - String
*/
public void setKeyName(String keyName) {
this.keyName = keyName;
}
/**
* Returns the MD5 Hash for the data of this key.
*
* @return String MD5
*/
public String getMd5hash() {
return md5hash;
}
/**
* Sets the MD5 of this file.
*
* @param md5hash - Md5 of this file
*/
public void setMd5hash(String md5hash) {
this.md5hash = md5hash;
}
/**
* Number of bytes stored in the data part of this key.
*
* @return long size of the data file
*/
public long getSize() {
return size;
}
/**
* Sets the size of the Data part of this key.
*
* @param size - Size in long
*/
public void setSize(long size) {
this.size = size;
}
/**
* Version of this key.
*
* @return - returns the version of this key.
*/
public long getVersion() {
return version;
}
/**
* Sets the version of this key.
*
* @param version - Version String
*/
public void setVersion(long version) {
this.version = version;
}
/**
* Compares this object with the specified object for order. Returns a
* negative integer, zero, or a positive integer as this object is less
* than, equal to, or greater than the specified object.
*
* @param o the object to be compared.
*
* @return a negative integer, zero, or a positive integer as this object
* is less than, equal to, or greater than the specified object.
*
* @throws NullPointerException if the specified object is null
* @throws ClassCastException if the specified object's type prevents it
* from being compared to this object.
*/
@Override
public int compareTo(KeyInfo o) {
if (this.keyName.compareTo(o.getKeyName()) != 0) {
return this.keyName.compareTo(o.getKeyName());
}
if (this.getVersion() == o.getVersion()) {
return 0;
}
if (this.getVersion() < o.getVersion()) {
return -1;
}
return 1;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
KeyInfo keyInfo = (KeyInfo) o;
return new EqualsBuilder()
.append(version, keyInfo.version)
.append(keyName, keyInfo.keyName)
.isEquals();
}
@Override
public int hashCode() {
return new HashCodeBuilder(17, 37)
.append(version)
.append(keyName)
.toHashCode();
}
/**
* Parse a string to retuen BucketInfo Object.
*
* @param jsonString - Json String
*
* @return - BucketInfo
*
* @throws IOException
*/
public static KeyInfo parse(String jsonString) throws IOException {
return READER.readValue(jsonString);
}
/**
* Returns a JSON string of this object.
* After stripping out bytesUsed and keyCount
*
* @return String
*/
public String toJsonString() throws IOException {
return WRITER.writeValueAsString(this);
}
/**
* Returns the Object as a Json String.
*/
public String toDBString() throws IOException {
return JsonUtils.toJsonString(this);
}
}

View File

@ -1,80 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.response;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import java.util.List;
/**
* Represents an Ozone key Object with detail information of location.
*/
public class KeyInfoDetails extends KeyInfo {
/**
* a list of Map which maps localID to ContainerID
* to specify replica locations.
*/
private List<KeyLocation> keyLocations;
/**
* Set details of key location.
*
* @param keyLocations - details of key location
*/
public void setKeyLocations(List<KeyLocation> keyLocations) {
this.keyLocations = keyLocations;
}
/**
* Returns details of key location.
*
* @return volumeName
*/
public List<KeyLocation> getKeyLocations() {
return keyLocations;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
KeyInfoDetails that = (KeyInfoDetails) o;
return new EqualsBuilder()
.append(getVersion(), that.getVersion())
.append(getKeyName(), that.getKeyName())
.append(keyLocations, that.getKeyLocations())
.isEquals();
}
@Override
public int hashCode() {
return new HashCodeBuilder(17, 37)
.append(getVersion())
.append(getKeyName())
.append(keyLocations)
.toHashCode();
}
}

View File

@ -1,82 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.response;
/**
* KeyLocation class is used used for parsing json response
* when KeyInfoDetails Call is made.
*/
public class KeyLocation {
/**
* Which container this key stored.
*/
private final long containerID;
/**
* Which block this key stored inside a container.
*/
private final long localID;
/**
* Data length of this key replica.
*/
private final long length;
/**
* Offset of this key.
*/
private final long offset;
/**
* Constructs KeyLocation.
*/
public KeyLocation(long containerID, long localID,
long length, long offset) {
this.containerID = containerID;
this.localID = localID;
this.length = length;
this.offset = offset;
}
/**
* Returns the containerID of this Key.
*/
public long getContainerID() {
return containerID;
}
/**
* Returns the localID of this Key.
*/
public long getLocalID() {
return localID;
}
/**
* Returns the length of this Key.
*/
public long getLength() {
return length;
}
/**
* Returns the offset of this Key.
*/
public long getOffset() {
return offset;
}
}

View File

@ -1,154 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.response;
import java.io.IOException;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import org.apache.hadoop.ozone.web.utils.JsonUtils;
import com.fasterxml.jackson.annotation.JsonAutoDetect;
import com.fasterxml.jackson.annotation.JsonFilter;
import com.fasterxml.jackson.annotation.PropertyAccessor;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
import com.fasterxml.jackson.databind.ObjectWriter;
import com.fasterxml.jackson.databind.ser.FilterProvider;
import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter;
import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider;
/**
* List Bucket is the response for the ListBucket Query.
*/
public class ListBuckets {
static final String BUCKET_LIST = "BUCKET_LIST_FILTER";
private static final ObjectReader READER =
new ObjectMapper().readerFor(ListBuckets.class);
private static final ObjectWriter WRITER;
static {
ObjectMapper mapper = new ObjectMapper();
String[] ignorableFieldNames = {"dataFileName"};
FilterProvider filters = new SimpleFilterProvider()
.addFilter(BUCKET_LIST, SimpleBeanPropertyFilter
.serializeAllExcept(ignorableFieldNames));
mapper.setVisibility(PropertyAccessor.FIELD,
JsonAutoDetect.Visibility.ANY);
mapper.addMixIn(Object.class, MixIn.class);
mapper.setFilterProvider(filters);
WRITER = mapper.writerWithDefaultPrettyPrinter();
}
private List<BucketInfo> buckets;
/**
* Constructor for ListBuckets.
* @param buckets - List of buckets owned by this user
*/
public ListBuckets(List<BucketInfo> buckets) {
this.buckets = buckets;
}
/**
* Constructor for ListBuckets.
*/
public ListBuckets() {
this.buckets = new LinkedList<BucketInfo>();
}
/**
* Parses a String to return ListBuckets object.
*
* @param data - Json String
*
* @return - ListBuckets
*
* @throws IOException
*/
public static ListBuckets parse(String data) throws IOException {
return READER.readValue(data);
}
/**
* Returns a list of Buckets.
*
* @return Bucket list
*/
public List<BucketInfo> getBuckets() {
return buckets;
}
/**
* Sets the list of buckets owned by this user.
*
* @param buckets - List of Buckets
*/
public void setBuckets(List<BucketInfo> buckets) {
this.buckets = buckets;
}
/**
* Returns a JSON string of this object.
* After stripping out bytesUsed and keyCount
*
* @return String
*/
public String toJsonString() throws IOException {
return WRITER.writeValueAsString(this);
}
/**
* Returns the Object as a Json String.
*/
public String toDBString() throws IOException {
return JsonUtils.toJsonString(this);
}
/**
* Sorts the buckets based on bucketName.
* This is useful when we return the list of buckets
*/
public void sort() {
Collections.sort(buckets);
}
/**
* Add a new bucket to the list of buckets.
* @param bucketInfo - bucket Info
*/
public void addBucket(BucketInfo bucketInfo){
this.buckets.add(bucketInfo);
}
/**
* This class allows us to create custom filters
* for the Json serialization.
*/
@JsonFilter(BUCKET_LIST)
class MixIn {
}
}

View File

@ -1,209 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.response;
import java.io.IOException;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import org.apache.hadoop.ozone.web.handlers.BucketArgs;
import org.apache.hadoop.ozone.web.handlers.ListArgs;
import org.apache.hadoop.ozone.web.utils.JsonUtils;
import com.fasterxml.jackson.annotation.JsonAutoDetect;
import com.fasterxml.jackson.annotation.JsonFilter;
import com.fasterxml.jackson.annotation.PropertyAccessor;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
import com.fasterxml.jackson.databind.ObjectWriter;
import com.fasterxml.jackson.databind.ser.FilterProvider;
import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter;
import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider;
import com.google.common.base.Preconditions;
/**
* This class the represents the list of keys (Objects) in a bucket.
*/
public class ListKeys {
static final String OBJECT_LIST = "OBJECT_LIST_FILTER";
private static final ObjectReader READER =
new ObjectMapper().readerFor(ListKeys.class);
private static final ObjectWriter WRITER;
static {
ObjectMapper mapper = new ObjectMapper();
String[] ignorableFieldNames = {"dataFileName"};
FilterProvider filters = new SimpleFilterProvider()
.addFilter(OBJECT_LIST, SimpleBeanPropertyFilter
.serializeAllExcept(ignorableFieldNames));
mapper.setVisibility(PropertyAccessor.FIELD,
JsonAutoDetect.Visibility.ANY);
mapper.addMixIn(Object.class, MixIn.class);
mapper.setFilterProvider(filters);
WRITER = mapper.writerWithDefaultPrettyPrinter();
}
private String name;
private String prefix;
private long maxKeys;
private boolean truncated;
private List<KeyInfo> keyList;
/**
* Default constructor needed for json serialization.
*/
public ListKeys() {
this.keyList = new LinkedList<>();
}
/**
* Constructor for ListKeys.
*
* @param args ListArgs
* @param truncated is truncated
*/
public ListKeys(ListArgs args, boolean truncated) {
Preconditions.checkState(args.getArgs() instanceof BucketArgs);
this.name = ((BucketArgs) args.getArgs()).getBucketName();
this.prefix = args.getPrefix();
this.maxKeys = args.getMaxKeys();
this.truncated = truncated;
}
/**
* Converts a Json string to POJO.
* @param jsonString - json string.
* @return ListObject
* @throws IOException - Json conversion error.
*/
public static ListKeys parse(String jsonString) throws IOException {
return READER.readValue(jsonString);
}
/**
* Returns a list of Objects.
*
* @return List of KeyInfo Objects.
*/
public List<KeyInfo> getKeyList() {
return keyList;
}
/**
* Sets the list of Objects.
*
* @param objectList - List of Keys
*/
public void setKeyList(List<KeyInfo> objectList) {
this.keyList = objectList;
}
/**
* Gets the Max Key Count.
*
* @return long
*/
public long getMaxKeys() {
return maxKeys;
}
/**
* Gets bucket Name.
*
* @return String
*/
public String getName() {
return name;
}
/**
* Gets Prefix.
*
* @return String
*/
public String getPrefix() {
return prefix;
}
/**
* Gets truncated Status.
*
* @return Boolean
*/
public boolean isTruncated() {
return truncated;
}
/**
* Sets the value of truncated.
*
* @param value - Boolean
*/
public void setTruncated(boolean value) {
this.truncated = value;
}
/**
* Returns a JSON string of this object. After stripping out bytesUsed and
* keyCount.
*
* @return String
* @throws IOException - On json Errors.
*/
public String toJsonString() throws IOException {
return WRITER.writeValueAsString(this);
}
/**
* Returns the Object as a Json String.
*
* @return String
* @throws IOException - on json errors.
*/
public String toDBString() throws IOException {
return JsonUtils.toJsonString(this);
}
/**
* Sorts the keys based on name and version. This is useful when we return the
* list of keys.
*/
public void sort() {
Collections.sort(keyList);
}
/**
* Add a new key to the list of keys.
* @param keyInfo - key Info
*/
public void addKey(KeyInfo keyInfo){
this.keyList.add(keyInfo);
}
/**
* This class allows us to create custom filters for the Json serialization.
*/
@JsonFilter(OBJECT_LIST)
class MixIn {
}
}

View File

@ -1,152 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.response;
import java.io.IOException;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ozone.web.utils.JsonUtils;
import com.fasterxml.jackson.annotation.JsonAutoDetect;
import com.fasterxml.jackson.annotation.JsonFilter;
import com.fasterxml.jackson.annotation.PropertyAccessor;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
import com.fasterxml.jackson.databind.ObjectWriter;
import com.fasterxml.jackson.databind.ser.FilterProvider;
import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter;
import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider;
/**
* List Volume Class is the class that is returned in JSON format to
* users when they call ListVolumes.
*/
@InterfaceAudience.Private
public class ListVolumes {
private List<VolumeInfo> volumes;
static final String VOLUME_LIST = "VOLUME_LIST_FILTER";
private static final ObjectReader READER =
new ObjectMapper().readerFor(ListVolumes.class);
private static final ObjectWriter WRITER;
static {
ObjectMapper mapper = new ObjectMapper();
String[] ignorableFieldNames = {"bytesUsed", "bucketCount"};
FilterProvider filters = new SimpleFilterProvider()
.addFilter(VOLUME_LIST, SimpleBeanPropertyFilter
.serializeAllExcept(ignorableFieldNames));
mapper.setVisibility(PropertyAccessor.FIELD,
JsonAutoDetect.Visibility.ANY);
mapper.addMixIn(Object.class, MixIn.class);
mapper.setFilterProvider(filters);
WRITER = mapper.writerWithDefaultPrettyPrinter();
}
/**
* Used for json filtering.
*/
@JsonFilter(VOLUME_LIST)
class MixIn {
}
/**
* Constructs ListVolume objects.
*/
public ListVolumes() {
this.volumes = new LinkedList<VolumeInfo>();
}
/**
* Gets the list of volumes.
*
* @return List of VolumeInfo Objects
*/
public List<VolumeInfo> getVolumes() {
return volumes;
}
/**
* Sets volume info.
*
* @param volumes - List of Volumes
*/
public void setVolumes(List<VolumeInfo> volumes) {
this.volumes = volumes;
}
/**
* Returns a JSON string of this object.
* After stripping out bytesUsed and bucketCount
*
* @return String
*/
public String toJsonString() throws IOException {
return WRITER.writeValueAsString(this);
}
/**
* When we serialize a volumeInfo to our database
* we will use all fields. However the toJsonString
* will strip out bytesUsed and bucketCount from the
* volume Info
*
* @return Json String
*
* @throws IOException
*/
public String toDBString() throws IOException {
return JsonUtils.toJsonString(this);
}
/**
* Parses a String to return ListVolumes object.
*
* @param data - Json String
*
* @return - ListVolumes
*
* @throws IOException
*/
public static ListVolumes parse(String data) throws IOException {
return READER.readValue(data);
}
/**
* Adds a new volume info to the List.
*
* @param info - VolumeInfo
*/
public void addVolume(VolumeInfo info) {
this.volumes.add(info);
}
/**
* Sorts the volume names based on volume name.
* This is useful when we return the list of volume names
*/
public void sort() {
Collections.sort(volumes);
}
}

View File

@ -1,308 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.response;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ozone.web.request.OzoneQuota;
import org.apache.hadoop.ozone.web.utils.JsonUtils;
import com.fasterxml.jackson.annotation.JsonAutoDetect;
import com.fasterxml.jackson.annotation.JsonFilter;
import com.fasterxml.jackson.annotation.PropertyAccessor;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
import com.fasterxml.jackson.databind.ObjectWriter;
import com.fasterxml.jackson.databind.ser.FilterProvider;
import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter;
import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider;
/**
* VolumeInfo Class is the Java class that represents
* Json when VolumeInfo Call is made.
*/
@InterfaceAudience.Private
public class VolumeInfo implements Comparable<VolumeInfo> {
static final String VOLUME_INFO = "VOLUME_INFO_FILTER";
private static final ObjectReader READER =
new ObjectMapper().readerFor(VolumeInfo.class);
private static final ObjectWriter WRITER;
static {
ObjectMapper mapper = new ObjectMapper();
String[] ignorableFieldNames = {"bytesUsed", "bucketCount"};
FilterProvider filters = new SimpleFilterProvider()
.addFilter(VOLUME_INFO, SimpleBeanPropertyFilter
.serializeAllExcept(ignorableFieldNames));
mapper.setVisibility(PropertyAccessor.FIELD,
JsonAutoDetect.Visibility.ANY);
mapper.addMixIn(Object.class, MixIn.class);
mapper.setFilterProvider(filters);
WRITER = mapper.writerWithDefaultPrettyPrinter();
}
/**
* Custom Json Filter Class.
*/
@JsonFilter(VOLUME_INFO)
class MixIn {
}
private VolumeOwner owner;
private OzoneQuota quota;
private String volumeName;
private String createdOn;
private String createdBy;
private long bytesUsed;
private long bucketCount;
/**
* Constructor for VolumeInfo.
*
* @param volumeName - Name of the Volume
* @param createdOn _ Date String
* @param createdBy - Person who created it
*/
public VolumeInfo(String volumeName, String createdOn, String createdBy) {
this.createdOn = createdOn;
this.volumeName = volumeName;
this.createdBy = createdBy;
}
/**
* Constructor for VolumeInfo.
*/
public VolumeInfo() {
}
/**
* Returns the name of the person who created this volume.
*
* @return Name of Admin who created this
*/
public String getCreatedBy() {
return createdBy;
}
/**
* Sets the user name of the person who created this volume.
*
* @param createdBy - UserName
*/
public void setCreatedBy(String createdBy) {
this.createdBy = createdBy;
}
/**
* Gets the date on which this volume was created.
*
* @return - Date String
*/
public String getCreatedOn() {
return createdOn;
}
/**
* Sets the date string.
*
* @param createdOn - Date String
*/
public void setCreatedOn(String createdOn) {
this.createdOn = createdOn;
}
/**
* Returns the owner info.
*
* @return - OwnerInfo
*/
public VolumeOwner getOwner() {
return owner;
}
/**
* Sets the owner.
*
* @param owner - OwnerInfo
*/
public void setOwner(VolumeOwner owner) {
this.owner = owner;
}
/**
* Returns the quota information on a volume.
*
* @return Quota
*/
public OzoneQuota getQuota() {
return quota;
}
/**
* Sets the quota info.
*
* @param quota - Quota Info
*/
public void setQuota(OzoneQuota quota) {
this.quota = quota;
}
/**
* gets the volume name.
*
* @return - Volume Name
*/
public String getVolumeName() {
return volumeName;
}
/**
* Sets the volume name.
*
* @param volumeName - Volume Name
*/
public void setVolumeName(String volumeName) {
this.volumeName = volumeName;
}
/**
* Returns a JSON string of this object.
* After stripping out bytesUsed and bucketCount
*
* @return String - json string
* @throws IOException
*/
public String toJsonString() throws IOException {
return WRITER.writeValueAsString(this);
}
/**
* When we serialize a volumeInfo to our database
* we will use all fields. However the toJsonString
* will strip out bytesUsed and bucketCount from the
* volume Info
*
* @return Json String
*
* @throws IOException
*/
public String toDBString() throws IOException {
return JsonUtils.toJsonString(this);
}
/**
* Comparable Interface.
* @param o VolumeInfo Object.
* @return Result of comparison
*/
@Override
public int compareTo(VolumeInfo o) {
return this.volumeName.compareTo(o.getVolumeName());
}
/**
* Gets the number of bytesUsed by this volume.
*
* @return long - Bytes used
*/
public long getBytesUsed() {
return bytesUsed;
}
/**
* Sets number of bytesUsed by this volume.
*
* @param bytesUsed - Number of bytesUsed
*/
public void setBytesUsed(long bytesUsed) {
this.bytesUsed = bytesUsed;
}
/**
* Returns VolumeInfo class from json string.
*
* @param data - Json String
*
* @return VolumeInfo
*
* @throws IOException
*/
public static VolumeInfo parse(String data) throws IOException {
return READER.readValue(data);
}
/**
* Indicates whether some other object is "equal to" this one.
*
* @param obj the reference object with which to compare.
*
* @return {@code true} if this object is the same as the obj
* argument; {@code false} otherwise.
*/
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
VolumeInfo otherInfo = (VolumeInfo) obj;
return otherInfo.getVolumeName().equals(this.getVolumeName());
}
/**
* Returns a hash code value for the object. This method is
* supported for the benefit of hash tables such as those provided by
* HashMap.
* @return a hash code value for this object.
*
* @see Object#equals(Object)
* @see System#identityHashCode
*/
@Override
public int hashCode() {
return getVolumeName().hashCode();
}
/**
* Total number of buckets under this volume.
*
* @return - bucketCount
*/
public long getBucketCount() {
return bucketCount;
}
/**
* Sets the buckets count.
*
* @param bucketCount - Bucket Count
*/
public void setBucketCount(long bucketCount) {
this.bucketCount = bucketCount;
}
}

View File

@ -1,60 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.response;
import com.fasterxml.jackson.annotation.JsonInclude;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Volume Owner represents the owner of a volume.
*
* This is a class instead of a string since we might need to extend this class
* to support other forms of authentication.
*/
@InterfaceAudience.Private
public class VolumeOwner {
@JsonInclude(JsonInclude.Include.NON_NULL)
private String name;
/**
* Constructor for VolumeOwner.
*
* @param name - name of the User
*/
public VolumeOwner(String name) {
this.name = name;
}
/**
* Constructs Volume Owner.
*/
public VolumeOwner() {
name = null;
}
/**
* Returns the user name.
*
* @return Name
*/
public String getName() {
return name;
}
}

View File

@ -1,23 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Netty-based HTTP server implementation for Ozone.
*/
package org.apache.hadoop.ozone.web.response;

View File

@ -1,82 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web;
import org.apache.hadoop.hdds.protocol.StorageType;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.web.response.BucketInfo;
import org.junit.Test;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
/**
* Test Ozone Bucket Info operation.
*/
public class TestBucketInfo {
@Test
public void testBucketInfoJson() throws IOException {
BucketInfo bucketInfo = new BucketInfo("volumeName", "bucketName");
String bucketInfoString = bucketInfo.toJsonString();
BucketInfo newBucketInfo = BucketInfo.parse(bucketInfoString);
assertEquals(bucketInfo, newBucketInfo);
}
@Test
public void testBucketInfoDBString() throws IOException {
BucketInfo bucketInfo = new BucketInfo("volumeName", "bucketName");
String bucketInfoString = bucketInfo.toDBString();
BucketInfo newBucketInfo = BucketInfo.parse(bucketInfoString);
assertEquals(bucketInfo, newBucketInfo);
}
@Test
public void testBucketInfoAddAcls() throws IOException {
BucketInfo bucketInfo = new BucketInfo("volumeName", "bucketName");
String bucketInfoString = bucketInfo.toDBString();
BucketInfo newBucketInfo = BucketInfo.parse(bucketInfoString);
assertEquals(bucketInfo, newBucketInfo);
List<OzoneAcl> aclList = new ArrayList<>();
aclList.add(OzoneAcl.parseAcl("user:bilbo:r"));
aclList.add(OzoneAcl.parseAcl("user:samwise:rw"));
newBucketInfo.setAcls(aclList);
assertNotNull(newBucketInfo.getAcls());
assertEquals(2, newBucketInfo.getAcls().size());
}
@Test
public void testBucketInfoVersionAndType() throws IOException {
BucketInfo bucketInfo = new BucketInfo("volumeName", "bucketName");
bucketInfo.setVersioning(OzoneConsts.Versioning.ENABLED);
bucketInfo.setStorageType(StorageType.DISK);
String bucketInfoString = bucketInfo.toDBString();
BucketInfo newBucketInfo = BucketInfo.parse(bucketInfoString);
assertEquals(bucketInfo, newBucketInfo);
}
}

View File

@ -1,115 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.ozone.web;
import org.apache.hadoop.ozone.web.request.OzoneQuota;
import org.junit.Test;
import java.util.HashMap;
import java.util.Set;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
/**
* Test Ozone Volume Quota.
*/
public class TestQuota {
@Test
public void testParseQuota() {
HashMap<String, Boolean> testMatrix;
testMatrix = new HashMap<String, Boolean>();
testMatrix.put("10TB", Boolean.TRUE);
testMatrix.put("1 TB", Boolean.TRUE);
testMatrix.put("0MB", Boolean.TRUE);
testMatrix.put("0 TB", Boolean.TRUE);
testMatrix.put(" 1000MB ", Boolean.TRUE);
testMatrix.put(" 1000MBMB ", Boolean.FALSE);
testMatrix.put(" 1000MB00 ", Boolean.FALSE);
testMatrix.put("1000ZMB", Boolean.FALSE);
testMatrix.put("MB1000", Boolean.FALSE);
testMatrix.put("9999", Boolean.FALSE);
testMatrix.put("1", Boolean.FALSE);
testMatrix.put("remove", Boolean.FALSE);
testMatrix.put("1UNDEFINED", Boolean.FALSE);
testMatrix.put(null, Boolean.FALSE);
testMatrix.put("", Boolean.FALSE);
testMatrix.put("-1000MB", Boolean.FALSE);
testMatrix.put("1024 bytes", Boolean.TRUE);
testMatrix.put("1bytes", Boolean.TRUE);
testMatrix.put("0bytes", Boolean.TRUE);
testMatrix.put("10000 BYTES", Boolean.TRUE);
testMatrix.put("BYTESbytes", Boolean.FALSE);
testMatrix.put("bytes", Boolean.FALSE);
Set<String> keys = testMatrix.keySet();
for (String key : keys) {
if (testMatrix.get(key)) {
OzoneQuota.parseQuota(key);
} else {
try {
OzoneQuota.parseQuota(key);
// should never get here since the isValid call will throw
fail(key);
fail("An exception was expected but did not happen.");
} catch (IllegalArgumentException e) {
}
}
}
}
@Test
public void testVerifyQuota() {
OzoneQuota qt = OzoneQuota.parseQuota("10TB");
assertEquals(10, qt.getSize());
assertEquals(OzoneQuota.Units.TB, qt.getUnit());
assertEquals(10L * (1024L * 1024L * 1024L * 1024L), qt.sizeInBytes());
qt = OzoneQuota.parseQuota("10MB");
assertEquals(10, qt.getSize());
assertEquals(OzoneQuota.Units.MB, qt.getUnit());
assertEquals(10L * (1024L * 1024L), qt.sizeInBytes());
qt = OzoneQuota.parseQuota("10GB");
assertEquals(10, qt.getSize());
assertEquals(OzoneQuota.Units.GB, qt.getUnit());
assertEquals(10L * (1024L * 1024L * 1024L), qt.sizeInBytes());
qt = OzoneQuota.parseQuota("10BYTES");
assertEquals(10, qt.getSize());
assertEquals(OzoneQuota.Units.BYTES, qt.getUnit());
assertEquals(10L, qt.sizeInBytes());
OzoneQuota emptyQuota = new OzoneQuota();
assertEquals(-1L, emptyQuota.sizeInBytes());
assertEquals(0, emptyQuota.getSize());
assertEquals(OzoneQuota.Units.UNDEFINED, emptyQuota.getUnit());
}
@Test
public void testVerifyRemove() {
assertTrue(OzoneQuota.isRemove("remove"));
assertFalse(OzoneQuota.isRemove("not remove"));
assertFalse(OzoneQuota.isRemove(null));
}
}

View File

@ -1,101 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web;
import org.junit.Test;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Set;
import static org.apache.hadoop.ozone.web.utils.OzoneUtils.getRequestID;
import static org.apache.hadoop.ozone.web.utils.OzoneUtils.verifyResourceName;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
/**
* Test Ozone Utility operations like verifying resource name.
*/
public class TestUtils {
/**
* Tests if the bucket name handling is correct.
*/
@Test
public void testValidBucketNames() {
HashMap<String, Boolean> testMatrix;
// Init the Table with Strings and Expected Return values
testMatrix = new HashMap<String, Boolean>();
testMatrix.put("bucket-.ozone.self", Boolean.FALSE);
testMatrix.put("bucket.-ozone.self", Boolean.FALSE);
testMatrix.put(".bucket.ozone.self", Boolean.FALSE);
testMatrix.put("bucket.ozone.self.", Boolean.FALSE);
testMatrix.put("bucket..ozone.self", Boolean.FALSE);
testMatrix.put("192.1.1.1", Boolean.FALSE);
testMatrix.put("ab", Boolean.FALSE);
testMatrix.put("bucket.ozone.self.this.is.a.really.long.name.that."
+ "is.more.than.sixty.three.characters.long.for.sure", Boolean.FALSE);
testMatrix.put(null, Boolean.FALSE);
testMatrix.put("bucket@$", Boolean.FALSE);
testMatrix.put("BUCKET", Boolean.FALSE);
testMatrix.put("bucket .ozone.self", Boolean.FALSE);
testMatrix.put(" bucket.ozone.self", Boolean.FALSE);
testMatrix.put("bucket.ozone.self-", Boolean.FALSE);
testMatrix.put("-bucket.ozone.self", Boolean.FALSE);
testMatrix.put("bucket", Boolean.TRUE);
testMatrix.put("bucket.ozone.self", Boolean.TRUE);
testMatrix.put("bucket.ozone.self", Boolean.TRUE);
testMatrix.put("bucket-name.ozone.self", Boolean.TRUE);
testMatrix.put("bucket.1.ozone.self", Boolean.TRUE);
Set<String> keys = testMatrix.keySet();
for (String key : keys) {
if (testMatrix.get(key)) {
// For valid names there should be no exceptions at all
verifyResourceName(key);
} else {
try {
verifyResourceName(key);
// should never get here since the isValid call will throw
fail("An exception was expected but did not happen.");
} catch (IllegalArgumentException e) {
}
}
}
}
/**
* Just calls Request ID many times and assert we
* got different values, ideally this should be
* run under parallel threads. Since the function under
* test has no external dependencies it is assumed
* that this test is good enough.
*/
@Test
public void testRequestIDisRandom() {
HashSet<String> set = new HashSet<>();
for (int i = 0; i < 1000; i++) {
assertTrue(set.add(getRequestID()));
}
}
}

View File

@ -1,73 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web;
import org.apache.hadoop.ozone.web.response.ListVolumes;
import org.apache.hadoop.ozone.web.response.VolumeInfo;
import org.apache.hadoop.ozone.web.response.VolumeOwner;
import org.junit.Test;
import java.io.IOException;
import static org.junit.Assert.assertEquals;
/**
* Test Ozone Volume info structure.
*/
public class TestVolumeStructs {
@Test
public void testVolumeInfoParse() throws IOException {
VolumeInfo volInfo =
new VolumeInfo("testvol", "Thu, Apr 9, 2015 10:23:45 GMT", "gandalf");
VolumeOwner owner = new VolumeOwner("bilbo");
volInfo.setOwner(owner);
String jString = volInfo.toJsonString();
VolumeInfo newVollInfo = VolumeInfo.parse(jString);
String one = volInfo.toJsonString();
String two = newVollInfo.toJsonString();
assertEquals(volInfo.toJsonString(), newVollInfo.toJsonString());
}
@Test
public void testVolumeInfoValue() throws IOException {
String createdOn = "Thu, Apr 9, 2015 10:23:45 GMT";
String createdBy = "gandalf";
VolumeInfo volInfo = new VolumeInfo("testvol", createdOn, createdBy);
assertEquals(volInfo.getCreatedBy(), createdBy);
assertEquals(volInfo.getCreatedOn(), createdOn);
}
@Test
public void testVolumeListParse() throws IOException {
ListVolumes list = new ListVolumes();
for (int x = 0; x < 100; x++) {
VolumeInfo volInfo = new VolumeInfo("testvol" + Integer.toString(x),
"Thu, Apr 9, 2015 10:23:45 GMT", "gandalf");
list.addVolume(volInfo);
}
list.sort();
String listString = list.toJsonString();
ListVolumes newList = ListVolumes.parse(listString);
assertEquals(list.toJsonString(), newList.toJsonString());
}
}

View File

@ -47,10 +47,6 @@
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdds-container-service</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone-objectstore-service</artifactId>
</dependency>
<dependency>
<groupId>com.sun.xml.bind</groupId>
<artifactId>jaxb-impl</artifactId>

View File

@ -112,10 +112,6 @@ run cp "${ROOT}/hadoop-ozone/common/src/main/bin/stop-ozone.sh" "sbin/"
# fault injection tests
run cp -r "${ROOT}/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade" tests
#shaded datanode service
run mkdir -p "./share/hadoop/ozoneplugin"
run cp "${ROOT}/hadoop-ozone/objectstore-service/target/hadoop-ozone-objectstore-service-${HDDS_VERSION}-plugin.jar" "./share/hadoop/ozoneplugin/hadoop-ozone-datanode-plugin-${HDDS_VERSION}.jar"
# Optional documentation, could be missing
cp -r "${ROOT}/hadoop-hdds/docs/target/classes/docs" ./

View File

@ -19,27 +19,34 @@ Library OperatingSystem
Resource ../commonlib.robot
Test Setup Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab
Test Timeout 2 minute
Suite Setup Generate prefix
*** Variables ***
${prefix} generated
*** Keywords ***
Generate prefix
${random} = Generate Random String 5 [NUMBERS]
Set Suite Variable ${prefix} ${random}
*** Test Cases ***
RpcClient with port
Test ozone shell o3:// om:9862 rpcwoport
Test ozone shell o3:// om:9862 ${prefix}-rpcwoport
RpcClient volume acls
Test Volume Acls o3:// om:9862 rpcwoport2
Test Volume Acls o3:// om:9862 ${prefix}-rpcwoport2
RpcClient bucket acls
Test Bucket Acls o3:// om:9862 rpcwoport2
Test Bucket Acls o3:// om:9862 ${prefix}-rpcwoport2
RpcClient key acls
Test Key Acls o3:// om:9862 rpcwoport2
Test Key Acls o3:// om:9862 ${prefix}-rpcwoport2
RpcClient without host
Test ozone shell o3:// ${EMPTY} rpcwport
Test ozone shell o3:// ${EMPTY} ${prefix}-rpcwport
RpcClient without scheme
Test ozone shell ${EMPTY} ${EMPTY} rpcwoscheme
Test ozone shell ${EMPTY} ${EMPTY} ${prefix}-rpcwoscheme
*** Keywords ***
@ -48,20 +55,20 @@ Test ozone shell
${result} = Execute ozone sh volume create ${protocol}${server}/${volume} --quota 100TB
Should not contain ${result} Failed
Should contain ${result} Creating Volume: ${volume}
${result} = Execute ozone sh volume list ${protocol}${server}/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="${volume}")'
Should contain ${result} createdOn
${result} = Execute ozone sh volume list | grep -Ev 'Removed|DEBUG|ERROR|INFO|TRACE|WARN' | jq -r '.[] | select(.volumeName=="${volume}")'
Should contain ${result} createdOn
${result} = Execute ozone sh volume list ${protocol}${server}/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="${volume}")'
Should contain ${result} creationTime
${result} = Execute ozone sh volume list | grep -Ev 'Removed|DEBUG|ERROR|INFO|TRACE|WARN' | jq -r '. | select(.name=="${volume}")'
Should contain ${result} creationTime
# TODO: Disable updating the owner, acls should be used to give access to other user.
Execute ozone sh volume update ${protocol}${server}/${volume} --quota 10TB
# ${result} = Execute ozone sh volume info ${protocol}${server}/${volume} | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="${volume}") | .owner | .name'
# Should Be Equal ${result} bill
${result} = Execute ozone sh volume info ${protocol}${server}/${volume} | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="${volume}") | .quota | .size'
Should Be Equal ${result} 10
${result} = Execute ozone sh volume info ${protocol}${server}/${volume} | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="${volume}") | .quota'
Should Be Equal ${result} 10995116277760
Execute ozone sh bucket create ${protocol}${server}/${volume}/bb1
${result} = Execute ozone sh bucket info ${protocol}${server}/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType'
${result} = Execute ozone sh bucket info ${protocol}${server}/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="bb1") | .storageType'
Should Be Equal ${result} DISK
${result} = Execute ozone sh bucket list ${protocol}${server}/${volume}/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName'
${result} = Execute ozone sh bucket list ${protocol}${server}/${volume}/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="bb1") | .volumeName'
Should Be Equal ${result} ${volume}
Run Keyword Test key handling ${protocol} ${server} ${volume}
Execute ozone sh bucket delete ${protocol}${server}/${volume}/bb1
@ -106,12 +113,12 @@ Test key handling
Execute rm -f NOTICE.txt.1
Execute ozone sh key get ${protocol}${server}/${volume}/bb1/key1 NOTICE.txt.1
Execute ls -l NOTICE.txt.1
${result} = Execute ozone sh key info ${protocol}${server}/${volume}/bb1/key1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.keyName=="key1")'
Should contain ${result} createdOn
${result} = Execute ozone sh key list ${protocol}${server}/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.keyName=="key1") | .keyName'
${result} = Execute ozone sh key info ${protocol}${server}/${volume}/bb1/key1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="key1")'
Should contain ${result} creationTime
${result} = Execute ozone sh key list ${protocol}${server}/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="key1") | .name'
Should Be Equal ${result} key1
Execute ozone sh key rename ${protocol}${server}/${volume}/bb1 key1 key2
${result} = Execute ozone sh key list ${protocol}${server}/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[].keyName'
${result} = Execute ozone sh key list ${protocol}${server}/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.name'
Should Be Equal ${result} key2
Execute ozone sh key delete ${protocol}${server}/${volume}/bb1/key2

View File

@ -34,14 +34,14 @@ Check volume from ozonefs
Run ozoneFS tests
Execute ozone fs -mkdir -p o3fs://bucket1.fstest/testdir/deep
${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName'
${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name'
Should contain ${result} testdir/deep
Execute ozone fs -copyFromLocal NOTICE.txt o3fs://bucket1.fstest/testdir/deep/
${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName'
${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name'
Should contain ${result} NOTICE.txt
Execute ozone fs -put NOTICE.txt o3fs://bucket1.fstest/testdir/deep/PUTFILE.txt
${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName'
${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name'
Should contain ${result} PUTFILE.txt
${result} = Execute ozone fs -ls o3fs://bucket1.fstest/testdir/deep/
@ -49,13 +49,13 @@ Run ozoneFS tests
Should contain ${result} PUTFILE.txt
Execute ozone fs -mv o3fs://bucket1.fstest/testdir/deep/NOTICE.txt o3fs://bucket1.fstest/testdir/deep/MOVED.TXT
${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName'
${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name'
Should contain ${result} MOVED.TXT
Should not contain ${result} NOTICE.txt
Execute ozone fs -mkdir -p o3fs://bucket1.fstest/testdir/deep/subdir1
Execute ozone fs -cp o3fs://bucket1.fstest/testdir/deep/MOVED.TXT o3fs://bucket1.fstest/testdir/deep/subdir1/NOTICE.txt
${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName'
${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name'
Should contain ${result} subdir1/NOTICE.txt
${result} = Execute ozone fs -ls o3fs://bucket1.fstest/testdir/deep/subdir1/
@ -65,19 +65,19 @@ Run ozoneFS tests
Should not contain ${result} Failed
Execute ozone fs -rm o3fs://bucket1.fstest/testdir/deep/subdir1/NOTICE.txt
${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName'
${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name'
Should not contain ${result} NOTICE.txt
${result} = Execute ozone fs -rmdir o3fs://bucket1.fstest/testdir/deep/subdir1/
${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName'
${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name'
Should not contain ${result} subdir1
Execute ozone fs -touch o3fs://bucket1.fstest/testdir/TOUCHFILE.txt
${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName'
${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name'
Should contain ${result} TOUCHFILE.txt
Execute ozone fs -rm -r o3fs://bucket1.fstest/testdir/
${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName'
${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name'
Should not contain ${result} testdir
Execute rm -Rf localdir1

View File

@ -48,10 +48,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone-objectstore-service</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone-s3gateway</artifactId>

View File

@ -137,15 +137,6 @@ public interface MiniOzoneCluster {
*/
OzoneClient getRpcClient() throws IOException;
/**
* Returns an REST based {@link OzoneClient} to access the
* {@link MiniOzoneCluster}.
*
* @return {@link OzoneClient}
* @throws IOException
*/
OzoneClient getRestClient() throws IOException;
/**
* Returns StorageContainerLocationClient to communicate with
* {@link StorageContainerManager} associated with the MiniOzoneCluster.

View File

@ -36,7 +36,6 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientFactory;
import org.apache.hadoop.ozone.client.rest.OzoneException;
import org.apache.hadoop.ozone.common.Storage.StorageState;
import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
import org.apache.hadoop.ozone.om.OMConfigKeys;
@ -67,7 +66,6 @@ import java.util.concurrent.TimeoutException;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
.HEALTHY;
import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY;
import static org.apache.hadoop.ozone.OzoneConfigKeys
.DFS_CONTAINER_IPC_PORT;
import static org.apache.hadoop.ozone.OzoneConfigKeys
@ -212,18 +210,6 @@ public class MiniOzoneClusterImpl implements MiniOzoneCluster {
return OzoneClientFactory.getRpcClient(conf);
}
/**
* Creates an {@link OzoneClient} connected to this cluster's REST
* service. Callers take ownership of the client and must close it when done.
*
* @return OzoneRestClient connected to this cluster's REST service
* @throws OzoneException if Ozone encounters an error creating the client
*/
@Override
public OzoneClient getRestClient() throws IOException {
return OzoneClientFactory.getRestClient(conf);
}
/**
* Returns an RPC proxy connected to this cluster's StorageContainerManager
* for accessing container location information. Callers take ownership of
@ -619,8 +605,6 @@ public class MiniOzoneClusterImpl implements MiniOzoneCluster {
private void configureHddsDatanodes() {
conf.set(ScmConfigKeys.HDDS_REST_HTTP_ADDRESS_KEY, "0.0.0.0:0");
conf.set(HddsConfigKeys.HDDS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
conf.set(HDDS_DATANODE_PLUGINS_KEY,
"org.apache.hadoop.ozone.web.OzoneHddsDatanodeService");
conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT,
randomContainerPort);
conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT,

View File

@ -18,15 +18,23 @@
package org.apache.hadoop.ozone;
import java.io.Closeable;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.ratis.RatisHelper;
import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
import org.apache.hadoop.ozone.client.rpc.RpcClient;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.ozone.client.rest.OzoneException;
import org.apache.hadoop.hdds.ratis.RatisHelper;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
import static org.apache.hadoop.hdds.ratis.RatisHelper.newRaftClient;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
import org.apache.ratis.client.RaftClient;
import org.apache.ratis.protocol.RaftPeer;
import org.apache.ratis.rpc.RpcType;
@ -35,16 +43,6 @@ import org.apache.ratis.util.TimeDuration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.Closeable;
import java.io.IOException;
import java.net.URISyntaxException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
import static org.apache.hadoop.hdds.ratis.RatisHelper.newRaftClient;
/**
* Helpers for Ratis tests.
*/
@ -80,7 +78,7 @@ public interface RatisTestHelper {
}
public ClientProtocol newOzoneClient()
throws OzoneException, URISyntaxException, IOException {
throws IOException {
return new RpcClient(conf);
}

View File

@ -0,0 +1,95 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.HashMap;
import java.util.Scanner;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.protocol.StorageType;
import org.apache.hadoop.ozone.client.BucketArgs;
import org.apache.hadoop.ozone.client.ObjectStore;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.client.VolumeArgs;
import org.apache.commons.lang3.RandomStringUtils;
/**
* Utility to help to generate test data.
*/
public final class TestDataUtil {
private TestDataUtil() {
}
public static OzoneBucket createVolumeAndBucket(MiniOzoneCluster cluster,
String volumeName, String bucketName) throws IOException {
String userName = "user" + RandomStringUtils.randomNumeric(5);
String adminName = "admin" + RandomStringUtils.randomNumeric(5);
OzoneClient client = cluster.getClient();
VolumeArgs volumeArgs = VolumeArgs.newBuilder()
.setAdmin(adminName)
.setOwner(userName)
.build();
ObjectStore objectStore = client.getObjectStore();
objectStore.createVolume(volumeName, volumeArgs);
OzoneVolume volume = objectStore.getVolume(volumeName);
BucketArgs omBucketArgs = BucketArgs.newBuilder()
.setStorageType(StorageType.DISK)
.build();
volume.createBucket(bucketName, omBucketArgs);
return volume.getBucket(bucketName);
}
public static void createKey(OzoneBucket bucket, String keyName,
String content) throws IOException {
try (OutputStream stream = bucket
.createKey(keyName, content.length(), ReplicationType.STAND_ALONE,
ReplicationFactor.ONE, new HashMap<>())) {
stream.write(content.getBytes());
}
}
public static String getKey(OzoneBucket bucket, String keyName)
throws IOException {
try (InputStream stream = bucket.readKey(keyName)) {
return new Scanner(stream).useDelimiter("\\A").next();
}
}
public static OzoneBucket createVolumeAndBucket(MiniOzoneCluster cluster)
throws IOException {
String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
return createVolumeAndBucket(cluster, volumeName, bucketName);
}
}

View File

@ -1,413 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone;
import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
import org.apache.hadoop.ozone.client.rest.headers.Header;
import org.apache.hadoop.ozone.web.utils.OzoneUtils;
import org.apache.hadoop.util.Time;
import org.apache.http.HttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import javax.ws.rs.core.HttpHeaders;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Locale;
import static java.net.HttpURLConnection.HTTP_CREATED;
import static java.net.HttpURLConnection.HTTP_OK;
import static org.junit.Assert.assertEquals;
/**
* Helper functions to test Ozone.
*/
public class TestOzoneHelper {
public CloseableHttpClient createHttpClient() {
return HttpClients.createDefault();
}
/**
* Creates Volumes on Ozone Store.
*
* @throws IOException
*/
public void testCreateVolumes(int port) throws IOException {
SimpleDateFormat format =
new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
CloseableHttpClient client = createHttpClient();
String volumeName = OzoneUtils.getRequestID().toLowerCase();
try {
HttpPost httppost = new HttpPost(
String.format("http://localhost:%d/%s", port, volumeName));
httppost.addHeader(Header.OZONE_VERSION_HEADER,
Header.OZONE_V1_VERSION_HEADER);
httppost.addHeader(HttpHeaders.DATE,
format.format(new Date(Time.monotonicNow())));
httppost.addHeader(HttpHeaders.AUTHORIZATION,
Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " +
OzoneConsts.OZONE_SIMPLE_HDFS_USER);
httppost.addHeader(Header.OZONE_USER, OzoneConsts.OZONE_SIMPLE_HDFS_USER);
HttpResponse response = client.execute(httppost);
assertEquals(response.toString(), HTTP_CREATED,
response.getStatusLine().getStatusCode());
} finally {
client.close();
}
}
/**
* Create Volumes with Quota.
*
* @throws IOException
*/
public void testCreateVolumesWithQuota(int port) throws IOException {
SimpleDateFormat format =
new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
CloseableHttpClient client = createHttpClient();
String volumeName = OzoneUtils.getRequestID().toLowerCase();
try {
HttpPost httppost = new HttpPost(
String.format("http://localhost:%d/%s?quota=10TB", port, volumeName));
httppost.addHeader(Header.OZONE_VERSION_HEADER,
Header.OZONE_V1_VERSION_HEADER);
httppost.addHeader(HttpHeaders.DATE,
format.format(new Date(Time.monotonicNow())));
httppost.addHeader(HttpHeaders.AUTHORIZATION,
Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " +
OzoneConsts.OZONE_SIMPLE_HDFS_USER);
httppost.addHeader(Header.OZONE_USER, OzoneConsts.OZONE_SIMPLE_HDFS_USER);
HttpResponse response = client.execute(httppost);
assertEquals(response.toString(), HTTP_CREATED,
response.getStatusLine().getStatusCode());
} finally {
client.close();
}
}
/**
* Create Volumes with Invalid Quota.
*
* @throws IOException
*/
public void testCreateVolumesWithInvalidQuota(int port) throws IOException {
SimpleDateFormat format =
new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
CloseableHttpClient client = createHttpClient();
String volumeName = OzoneUtils.getRequestID().toLowerCase();
try {
HttpPost httppost = new HttpPost(
String.format("http://localhost:%d/%s?quota=NaN", port, volumeName));
httppost.addHeader(Header.OZONE_VERSION_HEADER,
Header.OZONE_V1_VERSION_HEADER);
httppost.addHeader(HttpHeaders.DATE,
format.format(new Date(Time.monotonicNow())));
httppost.addHeader(HttpHeaders.AUTHORIZATION,
Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " +
OzoneConsts.OZONE_SIMPLE_HDFS_USER);
httppost.addHeader(Header.OZONE_USER, OzoneConsts.OZONE_SIMPLE_HDFS_USER);
HttpResponse response = client.execute(httppost);
assertEquals(response.toString(), ErrorTable.MALFORMED_QUOTA
.getHttpCode(),
response.getStatusLine().getStatusCode());
} finally {
client.close();
}
}
/**
* To create a volume a user name must be specified using OZONE_USER header.
* This test verifies that we get an error in case we call without a OZONE
* user name.
*
* @throws IOException
*/
public void testCreateVolumesWithInvalidUser(int port) throws IOException {
SimpleDateFormat format =
new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
CloseableHttpClient client = createHttpClient();
String volumeName = OzoneUtils.getRequestID().toLowerCase();
try {
HttpPost httppost = new HttpPost(
String.format("http://localhost:%d/%s?quota=1TB", port, volumeName));
httppost.addHeader(Header.OZONE_VERSION_HEADER,
Header.OZONE_V1_VERSION_HEADER);
httppost.addHeader(HttpHeaders.DATE,
format.format(new Date(Time.monotonicNow())));
httppost.addHeader(HttpHeaders.AUTHORIZATION,
Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " +
OzoneConsts.OZONE_SIMPLE_HDFS_USER);
HttpResponse response = client.execute(httppost);
assertEquals(response.toString(), ErrorTable.USER_NOT_FOUND.getHttpCode(),
response.getStatusLine().getStatusCode());
} finally {
client.close();
}
}
/**
* Only Admins can create volumes in Ozone. This test uses simple userauth as
* backend and hdfs and root are admin users in the simple backend.
* <p>
* This test tries to create a volume as user bilbo.
*
* @throws IOException
*/
public void testCreateVolumesWithOutAdminRights(int port) throws IOException {
SimpleDateFormat format =
new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
CloseableHttpClient client = createHttpClient();
String volumeName = OzoneUtils.getRequestID().toLowerCase();
try {
HttpPost httppost = new HttpPost(
String.format("http://localhost:%d/%s?quota=NaN", port, volumeName));
httppost.addHeader(Header.OZONE_VERSION_HEADER,
Header.OZONE_V1_VERSION_HEADER);
httppost.addHeader(HttpHeaders.DATE,
format.format(new Date(Time.monotonicNow())));
httppost.addHeader(HttpHeaders.AUTHORIZATION,
Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " +
"bilbo"); // This is not a root user in Simple Auth
httppost.addHeader(Header.OZONE_USER, OzoneConsts.OZONE_SIMPLE_HDFS_USER);
HttpResponse response = client.execute(httppost);
assertEquals(response.toString(), ErrorTable.ACCESS_DENIED.getHttpCode(),
response.getStatusLine().getStatusCode());
} finally {
client.close();
}
}
/**
* Create a bunch of volumes in a loop.
*
* @throws IOException
*/
public void testCreateVolumesInLoop(int port) throws IOException {
SimpleDateFormat format =
new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
for (int x = 0; x < 1000; x++) {
CloseableHttpClient client = createHttpClient();
String volumeName = OzoneUtils.getRequestID().toLowerCase();
String userName = OzoneUtils.getRequestID().toLowerCase();
HttpPost httppost = new HttpPost(
String.format("http://localhost:%d/%s?quota=10TB", port, volumeName));
httppost.addHeader(Header.OZONE_VERSION_HEADER,
Header.OZONE_V1_VERSION_HEADER);
httppost.addHeader(HttpHeaders.DATE,
format.format(new Date(Time.monotonicNow())));
httppost.addHeader(HttpHeaders.AUTHORIZATION,
Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " +
OzoneConsts.OZONE_SIMPLE_HDFS_USER);
httppost.addHeader(Header.OZONE_USER, userName);
HttpResponse response = client.execute(httppost);
assertEquals(response.toString(), HTTP_CREATED,
response.getStatusLine().getStatusCode());
client.close();
}
}
/**
* Get volumes owned by the user.
*
* @throws IOException
*/
public void testGetVolumesByUser(int port) throws IOException {
SimpleDateFormat format =
new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
// We need to create a volume for this test to succeed.
testCreateVolumes(port);
CloseableHttpClient client = createHttpClient();
try {
HttpGet httpget =
new HttpGet(String.format("http://localhost:%d/", port));
httpget.addHeader(Header.OZONE_VERSION_HEADER,
Header.OZONE_V1_VERSION_HEADER);
httpget.addHeader(HttpHeaders.DATE,
format.format(new Date(Time.monotonicNow())));
httpget.addHeader(HttpHeaders.AUTHORIZATION,
Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " +
OzoneConsts.OZONE_SIMPLE_HDFS_USER);
httpget.addHeader(Header.OZONE_USER,
OzoneConsts.OZONE_SIMPLE_HDFS_USER);
HttpResponse response = client.execute(httpget);
assertEquals(response.toString(), HTTP_OK,
response.getStatusLine().getStatusCode());
} finally {
client.close();
}
}
/**
* Admins can read volumes belonging to other users.
*
* @throws IOException
*/
public void testGetVolumesOfAnotherUser(int port) throws IOException {
SimpleDateFormat format =
new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
CloseableHttpClient client = createHttpClient();
try {
HttpGet httpget =
new HttpGet(String.format("http://localhost:%d/", port));
httpget.addHeader(Header.OZONE_VERSION_HEADER,
Header.OZONE_V1_VERSION_HEADER);
httpget.addHeader(HttpHeaders.DATE,
format.format(new Date(Time.monotonicNow())));
httpget.addHeader(HttpHeaders.AUTHORIZATION,
Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " +
OzoneConsts.OZONE_SIMPLE_ROOT_USER);
// User Root is getting volumes belonging to user HDFS
httpget.addHeader(Header.OZONE_USER, OzoneConsts.OZONE_SIMPLE_HDFS_USER);
HttpResponse response = client.execute(httpget);
assertEquals(response.toString(), HTTP_OK,
response.getStatusLine().getStatusCode());
} finally {
client.close();
}
}
/**
* if you try to read volumes belonging to another user,
* then server always ignores it.
*
* @throws IOException
*/
public void testGetVolumesOfAnotherUserShouldFail(int port)
throws IOException {
SimpleDateFormat format =
new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
CloseableHttpClient client = createHttpClient();
String userName = OzoneUtils.getRequestID().toLowerCase();
try {
HttpGet httpget =
new HttpGet(String.format("http://localhost:%d/", port));
httpget.addHeader(Header.OZONE_VERSION_HEADER,
Header.OZONE_V1_VERSION_HEADER);
httpget.addHeader(HttpHeaders.DATE,
format.format(new Date(Time.monotonicNow())));
httpget.addHeader(HttpHeaders.AUTHORIZATION,
Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " +
userName);
// userName is NOT a root user, hence he should NOT be able to read the
// volumes of user HDFS
httpget.addHeader(Header.OZONE_USER, OzoneConsts.OZONE_SIMPLE_HDFS_USER);
HttpResponse response = client.execute(httpget);
// We will get an Error called userNotFound when using Simple Auth Scheme
assertEquals(response.toString(), ErrorTable.USER_NOT_FOUND.getHttpCode(),
response.getStatusLine().getStatusCode());
} finally {
client.close();
}
}
public void testListKeyOnEmptyBucket(int port) throws IOException {
SimpleDateFormat format =
new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
CloseableHttpClient client = createHttpClient();
String volumeName = OzoneUtils.getRequestID().toLowerCase();
String bucketName = OzoneUtils.getRequestID().toLowerCase() + "bucket";
try {
HttpPost httppost = new HttpPost(
String.format("http://localhost:%d/%s", port, volumeName));
httppost.addHeader(Header.OZONE_VERSION_HEADER,
Header.OZONE_V1_VERSION_HEADER);
httppost.addHeader(HttpHeaders.DATE,
format.format(new Date(Time.monotonicNow())));
httppost.addHeader(HttpHeaders.AUTHORIZATION,
Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " "
+ OzoneConsts.OZONE_SIMPLE_HDFS_USER);
httppost.addHeader(Header.OZONE_USER, OzoneConsts.OZONE_SIMPLE_HDFS_USER);
HttpResponse response = client.execute(httppost);
assertEquals(response.toString(), HTTP_CREATED,
response.getStatusLine().getStatusCode());
client.close();
client = createHttpClient();
httppost = new HttpPost(String
.format("http://localhost:%d/%s/%s", port, volumeName, bucketName));
httppost.addHeader(Header.OZONE_VERSION_HEADER,
Header.OZONE_V1_VERSION_HEADER);
httppost.addHeader(HttpHeaders.DATE,
format.format(new Date(Time.monotonicNow())));
httppost.addHeader(HttpHeaders.AUTHORIZATION,
Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " "
+ OzoneConsts.OZONE_SIMPLE_HDFS_USER);
httppost.addHeader(Header.OZONE_USER, OzoneConsts.OZONE_SIMPLE_HDFS_USER);
response = client.execute(httppost);
assertEquals(response.toString(), HTTP_CREATED,
response.getStatusLine().getStatusCode());
client.close();
client = createHttpClient();
HttpGet httpget = new HttpGet(String
.format("http://localhost:%d/%s/%s", port, volumeName, bucketName));
httpget.addHeader(Header.OZONE_VERSION_HEADER,
Header.OZONE_V1_VERSION_HEADER);
httpget.addHeader(HttpHeaders.DATE,
format.format(new Date(Time.monotonicNow())));
httpget.addHeader(HttpHeaders.AUTHORIZATION,
Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " "
+ OzoneConsts.OZONE_SIMPLE_HDFS_USER);
httpget.addHeader(Header.OZONE_USER, OzoneConsts.OZONE_SIMPLE_HDFS_USER);
response = client.execute(httpget);
assertEquals(response.toString() + " " + response.getStatusLine()
.getReasonPhrase(), HTTP_OK,
response.getStatusLine().getStatusCode());
} finally {
client.close();
}
}
}

View File

@ -16,37 +16,30 @@
*/
package org.apache.hadoop.ozone;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.utils.MetadataKeyFilters;
import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.google.common.primitives.Longs;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.protocol.StorageType;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.web.handlers.BucketArgs;
import org.apache.hadoop.ozone.web.handlers.KeyArgs;
import org.apache.hadoop.ozone.web.handlers.UserArgs;
import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
import org.apache.hadoop.ozone.web.utils.OzoneUtils;
import org.apache.hadoop.utils.MetadataKeyFilters;
import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter;
import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
import java.io.IOException;
import java.io.OutputStream;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* A helper class used by {@link TestStorageContainerManager} to generate
@ -56,53 +49,32 @@ public class TestStorageContainerManagerHelper {
private final MiniOzoneCluster cluster;
private final Configuration conf;
private final StorageHandler storageHandler;
public TestStorageContainerManagerHelper(MiniOzoneCluster cluster,
Configuration conf) throws IOException {
this.cluster = cluster;
this.conf = conf;
storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
}
public Map<String, OmKeyInfo> createKeys(int numOfKeys, int keySize)
throws Exception {
Map<String, OmKeyInfo> keyLocationMap = Maps.newHashMap();
String volume = "volume" + RandomStringUtils.randomNumeric(5);
String bucket = "bucket" + RandomStringUtils.randomNumeric(5);
String userName = "user" + RandomStringUtils.randomNumeric(5);
String adminName = "admin" + RandomStringUtils.randomNumeric(5);
UserArgs userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
null, null, null, null);
VolumeArgs createVolumeArgs = new VolumeArgs(volume, userArgs);
createVolumeArgs.setUserName(userName);
createVolumeArgs.setAdminName(adminName);
storageHandler.createVolume(createVolumeArgs);
BucketArgs bucketArgs = new BucketArgs(bucket, createVolumeArgs);
bucketArgs.setStorageType(StorageType.DISK);
storageHandler.createBucket(bucketArgs);
// Write 20 keys in bucket.
OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster);
// Write 20 keys in bucketName.
Set<String> keyNames = Sets.newHashSet();
KeyArgs keyArgs;
for (int i = 0; i < numOfKeys; i++) {
String keyName = RandomStringUtils.randomAlphabetic(5) + i;
keyNames.add(keyName);
keyArgs = new KeyArgs(keyName, bucketArgs);
keyArgs.setSize(keySize);
// Just for testing list keys call, so no need to write real data.
OutputStream stream = storageHandler.newKeyWriter(keyArgs);
stream.write(DFSUtil.string2Bytes(
RandomStringUtils.randomAlphabetic(5)));
stream.close();
TestDataUtil
.createKey(bucket, keyName, RandomStringUtils.randomAlphabetic(5));
}
for (String key : keyNames) {
OmKeyArgs arg = new OmKeyArgs.Builder()
.setVolumeName(volume)
.setBucketName(bucket)
.setVolumeName(bucket.getVolumeName())
.setBucketName(bucket.getName())
.setKeyName(key)
.setRefreshPipeline(true)
.build();

View File

@ -58,6 +58,7 @@ import org.apache.hadoop.ozone.client.BucketArgs;
import org.apache.hadoop.ozone.client.ObjectStore;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientException;
import org.apache.hadoop.ozone.client.OzoneClientFactory;
import org.apache.hadoop.ozone.client.OzoneKey;
import org.apache.hadoop.ozone.client.OzoneKeyDetails;
@ -67,7 +68,6 @@ import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.client.VolumeArgs;
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.client.rest.OzoneException;
import org.apache.hadoop.ozone.common.OzoneChecksumException;
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.common.interfaces.Container;
@ -240,7 +240,7 @@ public abstract class TestOzoneRpcClientAbstract {
@Test
public void testSetVolumeQuota()
throws IOException, OzoneException {
throws IOException {
String volumeName = UUID.randomUUID().toString();
store.createVolume(volumeName);
store.getVolume(volumeName).setQuota(
@ -264,7 +264,7 @@ public abstract class TestOzoneRpcClientAbstract {
@Test
public void testCreateVolumeWithMetadata()
throws IOException, OzoneException {
throws IOException, OzoneClientException {
String volumeName = UUID.randomUUID().toString();
VolumeArgs volumeArgs = VolumeArgs.newBuilder()
.addMetadata("key1", "val1")
@ -278,7 +278,7 @@ public abstract class TestOzoneRpcClientAbstract {
@Test
public void testCreateBucketWithMetadata()
throws IOException, OzoneException {
throws IOException, OzoneClientException {
long currentTime = Time.now();
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
@ -297,7 +297,7 @@ public abstract class TestOzoneRpcClientAbstract {
@Test
public void testCreateBucket()
throws IOException, OzoneException {
throws IOException, OzoneClientException {
long currentTime = Time.now();
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
@ -312,7 +312,7 @@ public abstract class TestOzoneRpcClientAbstract {
@Test
public void testCreateS3Bucket()
throws IOException, OzoneException {
throws IOException, OzoneClientException {
long currentTime = Time.now();
String userName = UserGroupInformation.getCurrentUser().getUserName();
String bucketName = UUID.randomUUID().toString();
@ -345,7 +345,7 @@ public abstract class TestOzoneRpcClientAbstract {
@Test
public void testListS3Buckets()
throws IOException, OzoneException {
throws IOException, OzoneClientException {
String userName = "ozone100";
String bucketName1 = UUID.randomUUID().toString();
String bucketName2 = UUID.randomUUID().toString();
@ -363,7 +363,7 @@ public abstract class TestOzoneRpcClientAbstract {
@Test
public void testListS3BucketsFail()
throws IOException, OzoneException {
throws IOException, OzoneClientException {
String userName = "randomUser";
Iterator<? extends OzoneBucket> iterator = store.listS3Buckets(userName,
null);
@ -402,7 +402,7 @@ public abstract class TestOzoneRpcClientAbstract {
@Test
public void testCreateS3BucketMapping()
throws IOException, OzoneException {
throws IOException, OzoneClientException {
long currentTime = Time.now();
String userName = "ozone";
String bucketName = UUID.randomUUID().toString();
@ -421,7 +421,7 @@ public abstract class TestOzoneRpcClientAbstract {
@Test
public void testCreateBucketWithVersioning()
throws IOException, OzoneException {
throws IOException, OzoneClientException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
store.createVolume(volumeName);
@ -436,7 +436,7 @@ public abstract class TestOzoneRpcClientAbstract {
@Test
public void testCreateBucketWithStorageType()
throws IOException, OzoneException {
throws IOException, OzoneClientException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
store.createVolume(volumeName);
@ -451,7 +451,7 @@ public abstract class TestOzoneRpcClientAbstract {
@Test
public void testCreateBucketWithAcls()
throws IOException, OzoneException {
throws IOException, OzoneClientException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
OzoneAcl userAcl = new OzoneAcl(USER, "test",
@ -470,7 +470,7 @@ public abstract class TestOzoneRpcClientAbstract {
@Test
public void testCreateBucketWithAllArgument()
throws IOException, OzoneException {
throws IOException, OzoneClientException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
OzoneAcl userAcl = new OzoneAcl(USER, "test",
@ -507,7 +507,7 @@ public abstract class TestOzoneRpcClientAbstract {
@Test
public void testAddBucketAcl()
throws IOException, OzoneException {
throws IOException, OzoneClientException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
store.createVolume(volumeName);
@ -526,7 +526,7 @@ public abstract class TestOzoneRpcClientAbstract {
@Test
public void testRemoveBucketAcl()
throws IOException, OzoneException {
throws IOException, OzoneClientException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
OzoneAcl userAcl = new OzoneAcl(USER, "test",
@ -581,7 +581,7 @@ public abstract class TestOzoneRpcClientAbstract {
@Test
public void testSetBucketVersioning()
throws IOException, OzoneException {
throws IOException, OzoneClientException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
store.createVolume(volumeName);
@ -618,7 +618,7 @@ public abstract class TestOzoneRpcClientAbstract {
@Test
public void testSetBucketStorageType()
throws IOException, OzoneException {
throws IOException, OzoneClientException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
store.createVolume(volumeName);
@ -678,7 +678,7 @@ public abstract class TestOzoneRpcClientAbstract {
@Test
public void testPutKey()
throws IOException, OzoneException {
throws IOException, OzoneClientException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
long currentTime = Time.now();
@ -746,7 +746,7 @@ public abstract class TestOzoneRpcClientAbstract {
@Test
public void testPutKeyRatisOneNode()
throws IOException, OzoneException {
throws IOException, OzoneClientException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
long currentTime = Time.now();
@ -781,7 +781,7 @@ public abstract class TestOzoneRpcClientAbstract {
@Test
public void testPutKeyRatisThreeNodes()
throws IOException, OzoneException {
throws IOException, OzoneClientException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
long currentTime = Time.now();
@ -976,7 +976,7 @@ public abstract class TestOzoneRpcClientAbstract {
}
@Test
public void testGetKeyDetails() throws IOException, OzoneException {
public void testGetKeyDetails() throws IOException, OzoneClientException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();

View File

@ -48,7 +48,6 @@ import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
import org.apache.hadoop.ozone.ozShell.TestOzoneShell;
import org.apache.hadoop.ozone.protocol.commands.RetriableDatanodeEventWatcher;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
@ -97,7 +96,7 @@ public class TestBlockDeletion {
GenericTestUtils.setLogLevel(SCMBlockDeletingService.LOG, Level.DEBUG);
String path =
GenericTestUtils.getTempPath(TestOzoneShell.class.getSimpleName());
GenericTestUtils.getTempPath(TestBlockDeletion.class.getSimpleName());
File baseDir = new File(path);
baseDir.mkdirs();

View File

@ -17,8 +17,15 @@
*/
package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
import java.io.IOException;
import java.util.HashMap;
import java.util.concurrent.TimeoutException;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
@ -26,32 +33,25 @@ import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.client.ObjectStore;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientFactory;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.client.rest.OzoneException;
import org.apache.hadoop.ozone.container.common.impl.ContainerData;
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE;
import org.apache.hadoop.test.GenericTestUtils;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE;
import org.junit.Assert;
import org.junit.Test;
import java.io.IOException;
import java.util.HashMap;
import java.util.concurrent.TimeoutException;
/**
* Test to behaviour of the datanode when recieve close container command.
*/
public class TestCloseContainerHandler {
@Test
public void test() throws IOException, TimeoutException, InterruptedException,
OzoneException {
public void test()
throws IOException, TimeoutException, InterruptedException {
//setup a cluster (1G free space is enough for a unit test)
OzoneConfiguration conf = new OzoneConfiguration();

View File

@ -1,208 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.ozone.om;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.StorageType;
import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.web.handlers.BucketArgs;
import org.apache.hadoop.ozone.web.handlers.KeyArgs;
import org.apache.hadoop.ozone.web.handlers.UserArgs;
import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
import org.apache.hadoop.ozone.web.utils.OzoneUtils;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.junit.Assert.assertEquals;
/**
* Test key write/read where a key can span multiple containers.
*/
public class TestMultipleContainerReadWrite {
private static MiniOzoneCluster cluster = null;
private static StorageHandler storageHandler;
private static UserArgs userArgs;
private static OzoneConfiguration conf;
@Rule
public ExpectedException exception = ExpectedException.none();
/**
* Create a MiniDFSCluster for testing.
* <p>
* Ozone is made active by setting OZONE_ENABLED = true
*
* @throws IOException
*/
@BeforeClass
public static void init() throws Exception {
conf = new OzoneConfiguration();
conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 1,
StorageUnit.MB);
conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 5);
cluster = MiniOzoneCluster.newBuilder(conf).build();
cluster.waitForClusterToBeReady();
storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
null, null, null, null);
}
/**
* Shutdown MiniDFSCluster.
*/
@AfterClass
public static void shutdown() {
if (cluster != null) {
cluster.shutdown();
}
}
@Test
public void testWriteRead() throws Exception {
String userName = "user" + RandomStringUtils.randomNumeric(5);
String adminName = "admin" + RandomStringUtils.randomNumeric(5);
String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
String keyName = "key" + RandomStringUtils.randomNumeric(5);
VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
createVolumeArgs.setUserName(userName);
createVolumeArgs.setAdminName(adminName);
storageHandler.createVolume(createVolumeArgs);
BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
bucketArgs.setStorageType(StorageType.DISK);
storageHandler.createBucket(bucketArgs);
String dataString = RandomStringUtils.randomAscii(3 * (int)OzoneConsts.MB);
KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
keyArgs.setSize(3 * (int)OzoneConsts.MB);
keyArgs.setUserName(userName);
try (OutputStream outputStream = storageHandler.newKeyWriter(keyArgs)) {
outputStream.write(dataString.getBytes());
}
byte[] data = new byte[dataString.length()];
try (InputStream inputStream = storageHandler.newKeyReader(keyArgs)) {
inputStream.read(data, 0, data.length);
}
assertEquals(dataString, new String(data));
// checking whether container meta data has the chunk file persisted.
MetricsRecordBuilder containerMetrics = getMetrics(
"StorageContainerMetrics");
assertCounter("numWriteChunk", 3L, containerMetrics);
assertCounter("numReadChunk", 3L, containerMetrics);
}
// Disable this test, because this tests assumes writing beyond a specific
// size is not allowed. Which is not true for now. Keeping this test in case
// we add this restrict in the future.
@Ignore
@Test
public void testErrorWrite() throws Exception {
String userName = "user" + RandomStringUtils.randomNumeric(5);
String adminName = "admin" + RandomStringUtils.randomNumeric(5);
String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
String keyName = "key" + RandomStringUtils.randomNumeric(5);
VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
createVolumeArgs.setUserName(userName);
createVolumeArgs.setAdminName(adminName);
storageHandler.createVolume(createVolumeArgs);
BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
bucketArgs.setStorageType(StorageType.DISK);
storageHandler.createBucket(bucketArgs);
String dataString1 = RandomStringUtils.randomAscii(100);
String dataString2 = RandomStringUtils.randomAscii(500);
KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
keyArgs.setSize(500);
try (OutputStream outputStream = storageHandler.newKeyWriter(keyArgs)) {
// first write will write succeed
outputStream.write(dataString1.getBytes());
// second write
exception.expect(IOException.class);
exception.expectMessage(
"Can not write 500 bytes with only 400 byte space");
outputStream.write(dataString2.getBytes());
}
}
@Test
public void testPartialRead() throws Exception {
String userName = "user" + RandomStringUtils.randomNumeric(5);
String adminName = "admin" + RandomStringUtils.randomNumeric(5);
String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
String keyName = "key" + RandomStringUtils.randomNumeric(5);
VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
createVolumeArgs.setUserName(userName);
createVolumeArgs.setAdminName(adminName);
storageHandler.createVolume(createVolumeArgs);
BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
bucketArgs.setStorageType(StorageType.DISK);
storageHandler.createBucket(bucketArgs);
String dataString = RandomStringUtils.randomAscii(500);
KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
keyArgs.setSize(500);
keyArgs.setUserName(userName);
try (OutputStream outputStream = storageHandler.newKeyWriter(keyArgs)) {
outputStream.write(dataString.getBytes());
}
byte[] data = new byte[600];
try (InputStream inputStream = storageHandler.newKeyReader(keyArgs)) {
int readLen = inputStream.read(data, 0, 340);
assertEquals(340, readLen);
assertEquals(dataString.substring(0, 340),
new String(data).substring(0, 340));
readLen = inputStream.read(data, 340, 260);
assertEquals(160, readLen);
assertEquals(dataString, new String(data).substring(0, 500));
readLen = inputStream.read(data, 500, 1);
assertEquals(-1, readLen);
}
}
}

View File

@ -19,26 +19,23 @@ package org.apache.hadoop.ozone.om;
import java.util.UUID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.StorageType;
import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneTestUtils;
import org.apache.hadoop.ozone.TestDataUtil;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.client.VolumeArgs;
import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
import org.apache.hadoop.ozone.security.acl.IOzoneObj;
import org.apache.hadoop.ozone.security.acl.RequestContext;
import org.apache.hadoop.ozone.web.handlers.BucketArgs;
import org.apache.hadoop.ozone.web.handlers.KeyArgs;
import org.apache.hadoop.ozone.web.handlers.UserArgs;
import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
import org.apache.hadoop.ozone.web.request.OzoneQuota;
import org.apache.hadoop.ozone.web.utils.OzoneUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.commons.lang3.RandomStringUtils;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
import org.junit.AfterClass;
import static org.junit.Assert.assertTrue;
@ -52,9 +49,8 @@ import org.junit.rules.ExpectedException;
*/
public class TestOmAcls {
private static boolean aclAllow = true;
private static MiniOzoneCluster cluster = null;
private static StorageHandler storageHandler;
private static UserArgs userArgs;
private static OMMetrics omMetrics;
private static OzoneConfiguration conf;
private static String clusterId;
@ -80,15 +76,13 @@ public class TestOmAcls {
conf.setInt(OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, 2);
conf.setClass(OZONE_ACL_AUTHORIZER_CLASS, OzoneAccessAuthorizerTest.class,
IAccessAuthorizer.class);
conf.setStrings(OZONE_ADMINISTRATORS, OZONE_ADMINISTRATORS_WILDCARD);
cluster = MiniOzoneCluster.newBuilder(conf)
.setClusterId(clusterId)
.setScmId(scmId)
.setOmId(omId)
.build();
cluster.waitForClusterToBeReady();
storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
null, null, null, null);
omMetrics = cluster.getOzoneManager().getMetrics();
logCapturer =
GenericTestUtils.LogCapturer.captureLogs(OzoneManager.getLogger());
@ -104,65 +98,54 @@ public class TestOmAcls {
}
}
/**
* Tests the OM Initialization.
*/
@Test
public void testOMAclsPermissionDenied() throws Exception {
String user0 = "testListVolumes-user-0";
String adminUser = "testListVolumes-admin";
final VolumeArgs createVolumeArgs;
int i = 100;
String user0VolName = "Vol-" + user0 + "-" + i;
createVolumeArgs = new VolumeArgs(user0VolName, userArgs);
createVolumeArgs.setUserName(user0);
createVolumeArgs.setAdminName(adminUser);
createVolumeArgs.setQuota(new OzoneQuota(i, OzoneQuota.Units.GB));
logCapturer.clearOutput();
OzoneTestUtils.expectOmException(ResultCodes.PERMISSION_DENIED,
() -> storageHandler.createVolume(createVolumeArgs));
assertTrue(logCapturer.getOutput().contains("Only admin users are " +
"authorized to create Ozone"));
BucketArgs bucketArgs = new BucketArgs("bucket1", createVolumeArgs);
bucketArgs.setStorageType(StorageType.DISK);
@Test
public void testBucketCreationPermissionDenied() throws Exception {
TestOmAcls.aclAllow = true;
String volumeName = RandomStringUtils.randomAlphabetic(5).toLowerCase();
String bucketName = RandomStringUtils.randomAlphabetic(5).toLowerCase();
cluster.getClient().getObjectStore().createVolume(volumeName);
OzoneVolume volume =
cluster.getClient().getObjectStore().getVolume(volumeName);
TestOmAcls.aclAllow = false;
OzoneTestUtils.expectOmException(ResultCodes.PERMISSION_DENIED,
() -> storageHandler.createBucket(bucketArgs));
assertTrue(logCapturer.getOutput().contains("Only admin users are" +
" authorized to create Ozone"));
() -> volume.createBucket(bucketName));
assertTrue(logCapturer.getOutput()
.contains("doesn't have CREATE permission to access volume"));
}
@Test
public void testFailureInKeyOp() throws Exception {
final VolumeArgs createVolumeArgs;
String userName = "user" + RandomStringUtils.randomNumeric(5);
String adminName = "admin" + RandomStringUtils.randomNumeric(5);
createVolumeArgs = new VolumeArgs(userName, userArgs);
createVolumeArgs.setUserName(userName);
createVolumeArgs.setAdminName(adminName);
createVolumeArgs.setQuota(new OzoneQuota(100, OzoneQuota.Units.GB));
BucketArgs bucketArgs = new BucketArgs("bucket1", createVolumeArgs);
bucketArgs.setStorageType(StorageType.DISK);
TestOmAcls.aclAllow = true;
OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster);
logCapturer.clearOutput();
// write a key without specifying size at all
String keyName = "testKey";
KeyArgs keyArgs = new KeyArgs(keyName, bucketArgs);
TestOmAcls.aclAllow = false;
OzoneTestUtils.expectOmException(ResultCodes.PERMISSION_DENIED,
() -> storageHandler.newKeyWriter(keyArgs));
() -> TestDataUtil.createKey(bucket, "testKey", "testcontent"));
assertTrue(logCapturer.getOutput().contains("doesn't have WRITE " +
"permission to access key"));
}
}
/**
/**
* Test implementation to negative case.
*/
class OzoneAccessAuthorizerTest implements IAccessAuthorizer {
static class OzoneAccessAuthorizerTest implements IAccessAuthorizer {
@Override
public boolean checkAccess(IOzoneObj ozoneObject, RequestContext context) {
return false;
return TestOmAcls.aclAllow;
}
}
}

View File

@ -16,50 +16,39 @@
*/
package org.apache.hadoop.ozone.om;
import org.apache.commons.lang3.RandomStringUtils;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.StorageType;
import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.TestDataUtil;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
import org.apache.hadoop.ozone.web.handlers.BucketArgs;
import org.apache.hadoop.ozone.web.handlers.KeyArgs;
import org.apache.hadoop.ozone.web.handlers.UserArgs;
import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
import org.apache.hadoop.ozone.web.utils.OzoneUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.commons.lang3.RandomStringUtils;
import org.junit.AfterClass;
import org.junit.Assert;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.Assert;
import org.junit.rules.ExpectedException;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.List;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
/**
* This class tests the versioning of blocks from OM side.
*/
public class TestOmBlockVersioning {
private static MiniOzoneCluster cluster = null;
private static UserArgs userArgs;
private static OzoneConfiguration conf;
private static OzoneManager ozoneManager;
private static StorageHandler storageHandler;
@Rule
public ExpectedException exception = ExpectedException.none();
@ -76,9 +65,6 @@ public class TestOmBlockVersioning {
conf = new OzoneConfiguration();
cluster = MiniOzoneCluster.newBuilder(conf).build();
cluster.waitForClusterToBeReady();
storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
null, null, null, null);
ozoneManager = cluster.getOzoneManager();
}
@ -101,14 +87,7 @@ public class TestOmBlockVersioning {
String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
String keyName = "key" + RandomStringUtils.randomNumeric(5);
VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
createVolumeArgs.setUserName(userName);
createVolumeArgs.setAdminName(adminName);
storageHandler.createVolume(createVolumeArgs);
BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
bucketArgs.setStorageType(StorageType.DISK);
storageHandler.createBucket(bucketArgs);
TestDataUtil.createVolumeAndBucket(cluster, volumeName, bucketName);
OmKeyArgs keyArgs = new OmKeyArgs.Builder()
.setVolumeName(volumeName)
@ -200,14 +179,8 @@ public class TestOmBlockVersioning {
String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
String keyName = "key" + RandomStringUtils.randomNumeric(5);
VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
createVolumeArgs.setUserName(userName);
createVolumeArgs.setAdminName(adminName);
storageHandler.createVolume(createVolumeArgs);
BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
bucketArgs.setStorageType(StorageType.DISK);
storageHandler.createBucket(bucketArgs);
OzoneBucket bucket =
TestDataUtil.createVolumeAndBucket(cluster, volumeName, bucketName);
OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
.setVolumeName(volumeName)
@ -218,48 +191,30 @@ public class TestOmBlockVersioning {
.build();
String dataString = RandomStringUtils.randomAlphabetic(100);
KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
keyArgs.setUserName(userName);
// this write will create 1st version with one block
try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
stream.write(dataString.getBytes());
}
byte[] data = new byte[dataString.length()];
try (InputStream in = storageHandler.newKeyReader(keyArgs)) {
in.read(data);
}
TestDataUtil.createKey(bucket, keyName, dataString);
assertEquals(dataString, TestDataUtil.getKey(bucket, keyName));
OmKeyInfo keyInfo = ozoneManager.lookupKey(omKeyArgs);
assertEquals(dataString, DFSUtil.bytes2String(data));
assertEquals(0, keyInfo.getLatestVersionLocations().getVersion());
assertEquals(1,
keyInfo.getLatestVersionLocations().getLocationList().size());
// this write will create 2nd version, 2nd version will contain block from
// version 1, and add a new block
dataString = RandomStringUtils.randomAlphabetic(10);
data = new byte[dataString.length()];
try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
stream.write(dataString.getBytes());
}
try (InputStream in = storageHandler.newKeyReader(keyArgs)) {
in.read(data);
}
TestDataUtil.createKey(bucket, keyName, dataString);
keyInfo = ozoneManager.lookupKey(omKeyArgs);
assertEquals(dataString, DFSUtil.bytes2String(data));
assertEquals(dataString, TestDataUtil.getKey(bucket, keyName));
assertEquals(1, keyInfo.getLatestVersionLocations().getVersion());
assertEquals(2,
keyInfo.getLatestVersionLocations().getLocationList().size());
dataString = RandomStringUtils.randomAlphabetic(200);
data = new byte[dataString.length()];
try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
stream.write(dataString.getBytes());
}
try (InputStream in = storageHandler.newKeyReader(keyArgs)) {
in.read(data);
}
TestDataUtil.createKey(bucket, keyName, dataString);
keyInfo = ozoneManager.lookupKey(omKeyArgs);
assertEquals(dataString, DFSUtil.bytes2String(data));
assertEquals(dataString, TestDataUtil.getKey(bucket, keyName));
assertEquals(2, keyInfo.getLatestVersionLocations().getVersion());
assertEquals(3,
keyInfo.getLatestVersionLocations().getLocationList().size());

View File

@ -16,14 +16,14 @@
*/
package org.apache.hadoop.ozone.om;
import java.io.IOException;
import java.util.UUID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.web.handlers.UserArgs;
import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
import org.apache.hadoop.ozone.web.utils.OzoneUtils;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
@ -31,18 +31,11 @@ import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import java.io.IOException;
import java.util.UUID;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
/**
* Test Ozone Manager Init.
*/
public class TestOmInit {
private static MiniOzoneCluster cluster = null;
private static StorageHandler storageHandler;
private static UserArgs userArgs;
private static OMMetrics omMetrics;
private static OzoneConfiguration conf;
private static String clusterId;
@ -72,9 +65,6 @@ public class TestOmInit {
.setOmId(omId)
.build();
cluster.waitForClusterToBeReady();
storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
null, null, null, null);
omMetrics = cluster.getOzoneManager().getMetrics();
}

View File

@ -18,7 +18,10 @@
package org.apache.hadoop.ozone.om;
import org.apache.commons.lang3.RandomStringUtils;
import java.io.IOException;
import java.util.HashMap;
import java.util.UUID;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@ -29,25 +32,20 @@ import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneKey;
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.web.handlers.UserArgs;
import org.apache.hadoop.ozone.web.utils.OzoneUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import java.io.IOException;
import java.util.HashMap;
import java.util.UUID;
import org.apache.commons.lang3.RandomStringUtils;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
import org.junit.After;
import org.junit.Assert;
import static org.junit.Assert.fail;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
/**
* Test some client operations after cluster starts. And perform restart and
@ -55,7 +53,6 @@ import static org.junit.Assert.fail;
*/
public class TestOzoneManagerRestart {
private MiniOzoneCluster cluster = null;
private UserArgs userArgs;
private OzoneConfiguration conf;
private String clusterId;
private String scmId;
@ -86,8 +83,7 @@ public class TestOzoneManagerRestart {
.setOmId(omId)
.build();
cluster.waitForClusterToBeReady();
userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
null, null, null, null);
}
/**

View File

@ -17,13 +17,10 @@
*/
package org.apache.hadoop.ozone.ozShell;
import com.google.common.base.Strings;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.PrintStream;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import org.apache.hadoop.fs.FileUtil;
@ -31,19 +28,19 @@ import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.HddsDatanodeService;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.client.rest.RestClient;
import org.apache.hadoop.ozone.client.rpc.RpcClient;
import org.apache.hadoop.test.GenericTestUtils;
import com.google.common.base.Strings;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
import static org.junit.Assert.fail;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import picocli.CommandLine;
@ -53,13 +50,9 @@ import picocli.CommandLine.ParameterException;
import picocli.CommandLine.ParseResult;
import picocli.CommandLine.RunLast;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION;
import static org.junit.Assert.fail;
/**
* This test class specified for testing Ozone datanode shell command.
*/
@RunWith(value = Parameterized.class)
public class TestOzoneDatanodeShell {
private static final Logger LOG =
@ -81,17 +74,6 @@ public class TestOzoneDatanodeShell {
private static final PrintStream OLD_OUT = System.out;
private static final PrintStream OLD_ERR = System.err;
@Parameterized.Parameters
public static Collection<Object[]> clientProtocol() {
Object[][] params = new Object[][]{
{RpcClient.class},
{RestClient.class}};
return Arrays.asList(params);
}
@Parameterized.Parameter
@SuppressWarnings("visibilitymodifier")
public Class clientProtocol;
/**
* Create a MiniDFSCluster for testing with using distributed Ozone
* handler type.
@ -103,7 +85,7 @@ public class TestOzoneDatanodeShell {
conf = new OzoneConfiguration();
String path = GenericTestUtils.getTempPath(
TestOzoneShell.class.getSimpleName());
TestOzoneDatanodeShell.class.getSimpleName());
baseDir = new File(path);
baseDir.mkdirs();

View File

@ -1,221 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web;
import static com.google.common.base.Charsets.UTF_8;
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
import static org.apache.hadoop.ozone.OzoneConsts.CHUNK_SIZE;
import static org.junit.Assert.*;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.hdds.client.OzoneQuota;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.ozone.client.VolumeArgs;
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.rpc.RpcClient;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.junit.rules.Timeout;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashMap;
/**
* End-to-end testing of Ozone REST operations.
*/
public class TestOzoneRestWithMiniCluster {
/**
* Set the timeout for every test.
*/
@Rule
public Timeout testTimeout = new Timeout(300000);
private static MiniOzoneCluster cluster;
private static OzoneConfiguration conf;
private static ClientProtocol client;
private static ReplicationFactor replicationFactor = ReplicationFactor.ONE;
private static ReplicationType replicationType = ReplicationType.RATIS;
@Rule
public ExpectedException exception = ExpectedException.none();
@BeforeClass
public static void init() throws Exception {
conf = new OzoneConfiguration();
cluster = MiniOzoneCluster.newBuilder(conf).build();
cluster.waitForClusterToBeReady();
client = new RpcClient(conf);
}
@AfterClass
public static void shutdown() throws InterruptedException, IOException {
if (cluster != null) {
cluster.shutdown();
}
client.close();
}
@Test
public void testCreateAndGetVolume() throws Exception {
createAndGetVolume();
}
@Test
public void testCreateAndGetBucket() throws Exception {
OzoneVolume volume = createAndGetVolume();
createAndGetBucket(volume);
}
@Test
public void testPutAndGetKey() throws Exception {
String keyName = nextId("key");
String keyData = nextId("data");
OzoneVolume volume = createAndGetVolume();
OzoneBucket bucket = createAndGetBucket(volume);
putKey(bucket, keyName, keyData);
}
private void putKey(OzoneBucket bucket, String keyName, String keyData)
throws IOException {
try (
OzoneOutputStream ozoneOutputStream = bucket
.createKey(keyName, 0, replicationType, replicationFactor,
new HashMap<>());
InputStream inputStream = IOUtils.toInputStream(keyData, UTF_8)) {
IOUtils.copy(inputStream, ozoneOutputStream);
}
try (
InputStream inputStream = IOUtils.toInputStream(keyData, UTF_8);
OzoneInputStream ozoneInputStream = bucket.readKey(keyName)) {
IOUtils.contentEquals(ozoneInputStream, inputStream);
}
}
@Test
public void testPutAndGetEmptyKey() throws Exception {
String keyName = nextId("key");
String keyData = "";
OzoneVolume volume = createAndGetVolume();
OzoneBucket bucket = createAndGetBucket(volume);
putKey(bucket, keyName, keyData);
}
@Test
public void testPutAndGetMultiChunkKey() throws Exception {
String keyName = nextId("key");
int keyDataLen = 3 * CHUNK_SIZE;
String keyData = buildKeyData(keyDataLen);
OzoneVolume volume = createAndGetVolume();
OzoneBucket bucket = createAndGetBucket(volume);
putKey(bucket, keyName, keyData);
}
@Test
public void testPutAndGetMultiChunkKeyLastChunkPartial() throws Exception {
String keyName = nextId("key");
int keyDataLen = (int)(2.5 * CHUNK_SIZE);
String keyData = buildKeyData(keyDataLen);
OzoneVolume volume = createAndGetVolume();
OzoneBucket bucket = createAndGetBucket(volume);
putKey(bucket, keyName, keyData);
}
@Test
public void testReplaceKey() throws Exception {
String keyName = nextId("key");
int keyDataLen = (int)(2.5 * CHUNK_SIZE);
String keyData = buildKeyData(keyDataLen);
OzoneVolume volume = createAndGetVolume();
OzoneBucket bucket = createAndGetBucket(volume);
putKey(bucket, keyName, keyData);
// Replace key with data consisting of fewer chunks.
keyDataLen = (int)(1.5 * CHUNK_SIZE);
keyData = buildKeyData(keyDataLen);
putKey(bucket, keyName, keyData);
// Replace key with data consisting of more chunks.
keyDataLen = (int)(3.5 * CHUNK_SIZE);
keyData = buildKeyData(keyDataLen);
putKey(bucket, keyName, keyData);
}
private OzoneVolume createAndGetVolume() throws IOException {
String volumeName = nextId("volume");
VolumeArgs volumeArgs = VolumeArgs.newBuilder()
.setOwner("bilbo")
.setQuota("100TB")
.setAdmin("hdfs")
.build();
client.createVolume(volumeName, volumeArgs);
OzoneVolume volume = client.getVolumeDetails(volumeName);
assertEquals(volumeName, volume.getName());
assertNotNull(volume);
assertEquals("bilbo", volume.getOwner());
assertNotNull(volume.getQuota());
assertEquals(OzoneQuota.parseQuota("100TB").sizeInBytes(),
volume.getQuota());
return volume;
}
private OzoneBucket createAndGetBucket(OzoneVolume vol) throws IOException {
String bucketName = nextId("bucket");
vol.createBucket(bucketName);
OzoneBucket bucket = vol.getBucket(bucketName);
assertNotNull(bucket);
assertEquals(bucketName, bucket.getName());
return bucket;
}
/**
* Creates sample key data of the specified length. The data is a string of
* printable ASCII characters. This makes it easy to debug through visual
* inspection of the chunk files if a test fails.
*
* @param keyDataLen desired length of key data
* @return string of printable ASCII characters of the specified length
*/
private static String buildKeyData(int keyDataLen) {
return new String(dataset(keyDataLen, 33, 93), UTF_8);
}
/**
* Generates identifiers unique enough for use in tests, so that individual
* tests don't collide on each others' data in the shared mini-cluster.
*
* @param idPrefix prefix to put in front of ID
* @return unique ID generated by appending a suffix to the given prefix
*/
private static String nextId(String idPrefix) {
return (idPrefix + RandomStringUtils.random(5, true, true)).toLowerCase();
}
}

View File

@ -1,187 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.TestOzoneHelper;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.BeforeClass;
import org.junit.AfterClass;
import org.junit.Test;
import org.junit.Assert;
import org.junit.rules.Timeout;
import org.slf4j.LoggerFactory;
import java.io.IOException;
/**
* Test ozone volume in the distributed storage handler scenario.
*/
public class TestOzoneVolumes extends TestOzoneHelper {
private static final org.slf4j.Logger LOG =
LoggerFactory.getLogger(TestOzoneVolumes.class);
/**
* Set the timeout for every test.
*/
@Rule
public Timeout testTimeout = new Timeout(300000);
private static MiniOzoneCluster cluster = null;
private static int port = 0;
/**
* Create a MiniDFSCluster for testing.
* <p>
* Ozone is made active by setting OZONE_ENABLED = true
*
* @throws IOException
*/
@BeforeClass
public static void init() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG);
cluster = MiniOzoneCluster.newBuilder(conf).build();
cluster.waitForClusterToBeReady();
port = cluster.getHddsDatanodes().get(0)
.getDatanodeDetails()
.getPort(DatanodeDetails.Port.Name.REST).getValue();
}
/**
* Shutdown MiniDFSCluster.
*/
@AfterClass
public static void shutdown() {
if (cluster != null) {
cluster.shutdown();
}
}
/**
* Creates Volumes on Ozone Store.
*
* @throws IOException
*/
@Test
public void testCreateVolumes() throws IOException {
super.testCreateVolumes(port);
Assert.assertEquals(0, cluster.getOzoneManager()
.getMetrics().getNumVolumeCreateFails());
}
/**
* Create Volumes with Quota.
*
* @throws IOException
*/
@Test
public void testCreateVolumesWithQuota() throws IOException {
super.testCreateVolumesWithQuota(port);
Assert.assertEquals(0, cluster.getOzoneManager()
.getMetrics().getNumVolumeCreateFails());
}
/**
* Create Volumes with Invalid Quota.
*
* @throws IOException
*/
@Test
public void testCreateVolumesWithInvalidQuota() throws IOException {
super.testCreateVolumesWithInvalidQuota(port);
Assert.assertEquals(0, cluster.getOzoneManager()
.getMetrics().getNumVolumeCreateFails());
}
/**
* To create a volume a user name must be specified using OZONE_USER header.
* This test verifies that we get an error in case we call without a OZONE
* user name.
*
* @throws IOException
*/
@Test
public void testCreateVolumesWithInvalidUser() throws IOException {
super.testCreateVolumesWithInvalidUser(port);
Assert.assertEquals(0, cluster.getOzoneManager()
.getMetrics().getNumVolumeCreateFails());
}
/**
* Only Admins can create volumes in Ozone. This test uses simple userauth as
* backend and hdfs and root are admin users in the simple backend.
* <p>
* This test tries to create a volume as user bilbo.
*
* @throws IOException
*/
@Test
public void testCreateVolumesWithOutAdminRights() throws IOException {
super.testCreateVolumesWithOutAdminRights(port);
Assert.assertEquals(0, cluster.getOzoneManager()
.getMetrics().getNumVolumeCreateFails());
}
/**
* Create a bunch of volumes in a loop.
*
* @throws IOException
*/
@Test
public void testCreateVolumesInLoop() throws IOException {
super.testCreateVolumesInLoop(port);
Assert.assertEquals(0, cluster.getOzoneManager()
.getMetrics().getNumVolumeCreateFails());
}
/**
* Get volumes owned by the user.
*
* @throws IOException
*/
@Ignore("Test is ignored for time being, to be enabled after security.")
public void testGetVolumesByUser() throws IOException {
testGetVolumesByUser(port);
}
/**
* Admins can read volumes belonging to other users.
*
* @throws IOException
*/
@Ignore("Test is ignored for time being, to be enabled after security.")
public void testGetVolumesOfAnotherUser() throws IOException {
super.testGetVolumesOfAnotherUser(port);
}
/**
* if you try to read volumes belonging to another user,
* then server always ignores it.
*
* @throws IOException
*/
@Ignore("Test is ignored for time being, to be enabled after security.")
public void testGetVolumesOfAnotherUserShouldFail() throws IOException {
super.testGetVolumesOfAnotherUserShouldFail(port);
}
}

View File

@ -1,118 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.client.rest.headers.Header;
import org.apache.hadoop.util.Time;
import org.apache.http.HttpResponse;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import javax.ws.rs.core.HttpHeaders;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Locale;
import static java.net.HttpURLConnection.HTTP_CREATED;
import static org.apache.hadoop.ozone.web.utils.OzoneUtils.getRequestID;
import static org.junit.Assert.assertEquals;
/**
* Test Ozone Access through REST protocol.
*/
public class TestOzoneWebAccess {
/**
* Set the timeout for every test.
*/
@Rule
public Timeout testTimeout = new Timeout(300000);
private static MiniOzoneCluster cluster;
private static int port;
/**
* Create a MiniDFSCluster for testing.
*
* Ozone is made active by setting OZONE_ENABLED = true
* @throws IOException
*/
@BeforeClass
public static void init() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
cluster = MiniOzoneCluster.newBuilder(conf).build();
cluster.waitForClusterToBeReady();
port = cluster.getHddsDatanodes().get(0)
.getDatanodeDetails().getPort(
DatanodeDetails.Port.Name.REST).getValue();
}
/**
* shutdown MiniOzoneCluster.
*/
@AfterClass
public static void shutdown() {
if (cluster != null) {
cluster.shutdown();
}
}
/**
* Send a vaild Ozone Request.
*
* @throws IOException
*/
@Test
public void testOzoneRequest() throws IOException {
SimpleDateFormat format =
new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
CloseableHttpClient client = HttpClients.createDefault();
String volumeName = getRequestID().toLowerCase(Locale.US);
try {
HttpPost httppost = new HttpPost(
String.format("http://localhost:%d/%s", port, volumeName));
httppost.addHeader(Header.OZONE_VERSION_HEADER,
Header.OZONE_V1_VERSION_HEADER);
httppost.addHeader(HttpHeaders.DATE,
format.format(new Date(Time.now())));
httppost.addHeader(HttpHeaders.AUTHORIZATION,
Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " +
OzoneConsts.OZONE_SIMPLE_HDFS_USER);
httppost.addHeader(Header.OZONE_USER, OzoneConsts.OZONE_SIMPLE_HDFS_USER);
HttpResponse response = client.execute(httppost);
assertEquals(response.toString(), HTTP_CREATED,
response.getStatusLine().getStatusCode());
} finally {
client.close();
}
}
}

View File

@ -1,349 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.client;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.client.BucketArgs;
import org.apache.hadoop.ozone.client.VolumeArgs;
import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.rest.OzoneException;
import org.apache.hadoop.ozone.client.rest.RestClient;
import org.apache.hadoop.ozone.client.rpc.RpcClient;
import org.apache.hadoop.ozone.web.request.OzoneQuota;
import org.apache.hadoop.ozone.web.utils.OzoneUtils;
import org.apache.hadoop.util.Time;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import java.io.IOException;
import java.net.URISyntaxException;
import java.text.ParseException;
import java.util.Arrays;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.TimeoutException;
import java.util.stream.Collectors;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.junit.Assume.assumeFalse;
/**
* Test Ozone Bucket Lifecycle.
*/
@RunWith(value = Parameterized.class)
public class TestBuckets {
/**
* Set the timeout for every test.
*/
@Rule
public Timeout testTimeout = new Timeout(300000);
private static MiniOzoneCluster cluster = null;
private static ClientProtocol client = null;
private static OzoneConfiguration conf;
@Parameterized.Parameters
public static Collection<Object[]> clientProtocol() {
Object[][] params = new Object[][] {
{RpcClient.class},
{RestClient.class}};
return Arrays.asList(params);
}
@SuppressWarnings("visibilitymodifier")
@Parameterized.Parameter
public static Class clientProtocol;
/**
* Create a MiniDFSCluster for testing.
* <p>
* Ozone is made active by setting OZONE_ENABLED = true
*
* @throws IOException
*/
@BeforeClass
public static void init()
throws IOException, URISyntaxException, OzoneException, TimeoutException,
InterruptedException {
conf = new OzoneConfiguration();
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(3)
.build();
cluster.waitForClusterToBeReady();
}
@Before
public void setup() throws Exception {
if (clientProtocol.equals(RestClient.class)) {
client = new RestClient(conf);
} else {
client = new RpcClient(conf);
}
}
/**
* shutdown MiniDFSCluster.
*/
@AfterClass
public static void shutdown() {
if (cluster != null) {
cluster.shutdown();
}
}
@Test
public void testCreateBucket() throws Exception {
runTestCreateBucket(client);
}
static void runTestCreateBucket(ClientProtocol protocol)
throws IOException {
String volumeName = OzoneUtils.getRequestID().toLowerCase();
VolumeArgs volumeArgs = VolumeArgs.newBuilder()
.setOwner("bilbo")
.setQuota("100TB")
.setAdmin("hdfs")
.build();
protocol.createVolume(volumeName, volumeArgs);
OzoneVolume vol = protocol.getVolumeDetails(volumeName);
String[] acls = {"user:frodo:rw", "user:samwise:rw"};
// create 10 buckets under same volume
for (int x = 0; x < 10; x++) {
long currentTime = Time.now();
String bucketName = OzoneUtils.getRequestID().toLowerCase();
List<OzoneAcl> aclList =
Arrays.stream(acls).map(acl -> OzoneAcl.parseAcl(acl))
.collect(Collectors.toList());
BucketArgs bucketArgs = BucketArgs.newBuilder()
.setAcls(aclList)
.build();
vol.createBucket(bucketName, bucketArgs);
OzoneBucket bucket = vol.getBucket(bucketName);
assertEquals(bucket.getName(), bucketName);
// verify the bucket creation time
assertTrue((bucket.getCreationTime() / 1000) >= (currentTime / 1000));
}
protocol.close();
assertEquals(vol.getName(), volumeName);
assertEquals(vol.getAdmin(), "hdfs");
assertEquals(vol.getOwner(), "bilbo");
assertEquals(vol.getQuota(), OzoneQuota.parseQuota("100TB").sizeInBytes());
// Test create a bucket with invalid bucket name,
// not use Rule here because the test method is static.
try {
String invalidBucketName = "#" + OzoneUtils.getRequestID().toLowerCase();
vol.createBucket(invalidBucketName);
fail("Except the bucket creation to be failed because the"
+ " bucket name starts with an invalid char #");
} catch (Exception e) {
assertTrue(e.getMessage()
.contains("Bucket or Volume name has an unsupported character : #"));
}
}
@Test
public void testAddBucketAcls() throws Exception {
assumeFalse("Rest Client does not support ACL",
clientProtocol.equals(RestClient.class));
runTestAddBucketAcls(client);
}
static void runTestAddBucketAcls(ClientProtocol protocol)
throws OzoneException, IOException, ParseException {
String volumeName = OzoneUtils.getRequestID().toLowerCase();
VolumeArgs volumeArgs = VolumeArgs.newBuilder()
.setOwner("bilbo")
.setQuota("100TB")
.setAdmin("hdfs")
.build();
protocol.createVolume(volumeName, volumeArgs);
OzoneVolume vol = protocol.getVolumeDetails(volumeName);
String[] acls = {"user:frodo:rw", "user:samwise:rw"};
String bucketName = OzoneUtils.getRequestID().toLowerCase();
vol.createBucket(bucketName);
OzoneBucket bucket = vol.getBucket(bucketName);
List<OzoneAcl> aclList =
Arrays.stream(acls).map(acl -> OzoneAcl.parseAcl(acl))
.collect(Collectors.toList());
int numAcls = bucket.getAcls().size();
for (OzoneAcl ozoneAcl : aclList) {
Assert.assertTrue(bucket.addAcls(ozoneAcl));
}
OzoneBucket updatedBucket = vol.getBucket(bucketName);
assertEquals(updatedBucket.getAcls().size(), 2 + numAcls);
// verify if the creation time is missing after update operation
assertTrue(
(updatedBucket.getCreationTime()) / 1000 >= 0);
protocol.close();
}
@Test
public void testRemoveBucketAcls() throws Exception {
assumeFalse("Rest Client does not support ACL",
clientProtocol.equals(RestClient.class));
runTestRemoveBucketAcls(client);
}
static void runTestRemoveBucketAcls(ClientProtocol protocol)
throws OzoneException, IOException, ParseException {
String volumeName = OzoneUtils.getRequestID().toLowerCase();
VolumeArgs volumeArgs = VolumeArgs.newBuilder()
.setOwner("bilbo")
.setQuota("100TB")
.setAdmin("hdfs")
.build();
protocol.createVolume(volumeName, volumeArgs);
OzoneVolume vol = protocol.getVolumeDetails(volumeName);
String[] acls = {"user:frodo:rw", "user:samwise:rw"};
String bucketName = OzoneUtils.getRequestID().toLowerCase();
List<OzoneAcl> aclList =
Arrays.stream(acls).map(acl -> OzoneAcl.parseAcl(acl))
.collect(Collectors.toList());
vol.createBucket(bucketName);
OzoneBucket bucket = vol.getBucket(bucketName);
int numAcls = bucket.getAcls().size();
for (OzoneAcl ozoneAcl : aclList) {
Assert.assertTrue(bucket.addAcls(ozoneAcl));
}
assertEquals(bucket.getAcls().size(), 2 + numAcls);
for (OzoneAcl ozoneAcl : aclList) {
Assert.assertTrue(bucket.removeAcls(ozoneAcl));
}
OzoneBucket updatedBucket = vol.getBucket(bucketName);
// We removed all acls
assertEquals(updatedBucket.getAcls().size(), numAcls);
// verify if the creation time is missing after update operation
assertTrue(
(updatedBucket.getCreationTime() / 1000) >= 0);
protocol.close();
}
@Test
public void testDeleteBucket() throws OzoneException, IOException {
runTestDeleteBucket(client);
}
static void runTestDeleteBucket(ClientProtocol protocol)
throws OzoneException, IOException {
String volumeName = OzoneUtils.getRequestID().toLowerCase();
VolumeArgs volumeArgs = VolumeArgs.newBuilder()
.setOwner("bilbo")
.setQuota("100TB")
.setAdmin("hdfs")
.build();
protocol.createVolume(volumeName, volumeArgs);
OzoneVolume vol = protocol.getVolumeDetails(volumeName);
String[] acls = {"user:frodo:rw", "user:samwise:rw"};
String bucketName = OzoneUtils.getRequestID().toLowerCase();
List<OzoneAcl> aclList =
Arrays.stream(acls).map(acl -> OzoneAcl.parseAcl(acl))
.collect(Collectors.toList());
BucketArgs bucketArgs = BucketArgs.newBuilder()
.setAcls(aclList)
.build();
vol.createBucket(bucketName, bucketArgs);
vol.deleteBucket(bucketName);
try {
OzoneBucket updatedBucket = vol.getBucket(bucketName);
fail("Fetching deleted bucket, Should not reach here.");
} catch (Exception ex) {
// must throw
assertNotNull(ex);
}
protocol.close();
}
@Test
public void testListBucket() throws Exception {
runTestListBucket(client);
}
static void runTestListBucket(ClientProtocol protocol)
throws OzoneException, IOException, ParseException {
String volumeName = OzoneUtils.getRequestID().toLowerCase();
VolumeArgs volumeArgs = VolumeArgs.newBuilder()
.setOwner("bilbo")
.setQuota("100TB")
.setAdmin("hdfs")
.build();
protocol.createVolume(volumeName, volumeArgs);
OzoneVolume vol = protocol.getVolumeDetails(volumeName);
String[] acls = {"user:frodo:rw", "user:samwise:rw"};
List<OzoneAcl> aclList =
Arrays.stream(acls).map(acl -> OzoneAcl.parseAcl(acl))
.collect(Collectors.toList());
long currentTime = Time.now();
for (int x = 0; x < 10; x++) {
String bucketName = "listbucket-test-" + x;
BucketArgs bucketArgs = BucketArgs.newBuilder()
.setAcls(aclList)
.build();
vol.createBucket(bucketName, bucketArgs);
}
Iterator<? extends OzoneBucket> bucketIterator = vol.listBuckets(null);
int count = 0;
while (bucketIterator.hasNext()) {
assertTrue((bucketIterator.next().getCreationTime()
/ 1000) >= (currentTime / 1000));
count++;
}
assertEquals(count, 10);
bucketIterator = vol.listBuckets(null, "listbucket-test-4");
assertEquals(getSize(bucketIterator), 5);
bucketIterator = vol.listBuckets(null, "listbucket-test-3");
assertEquals(getSize(bucketIterator), 6);
protocol.close();
}
private static int getSize(Iterator<? extends OzoneBucket> bucketIterator) {
int count = 0;
while (bucketIterator.hasNext()) {
count++;
bucketIterator.next();
}
return count;
}
}

View File

@ -1,108 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.client;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.RatisTestHelper;
import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
import org.apache.hadoop.ozone.client.rest.OzoneException;
import org.apache.hadoop.ozone.client.rest.RestClient;
import org.apache.hadoop.ozone.client.rpc.RpcClient;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
/** The same as {@link TestBuckets} except that this test is Ratis enabled. */
@Ignore("Disabling Ratis tests for pipeline work.")
@RunWith(value = Parameterized.class)
public class TestBucketsRatis {
@Rule
public Timeout testTimeout = new Timeout(300000);
private static RatisTestHelper.RatisTestSuite suite;
private static ClientProtocol client;
private static OzoneConfiguration conf;
@Parameterized.Parameters
public static Collection<Object[]> clientProtocol() {
Object[][] params = new Object[][] {
{RpcClient.class},
{RestClient.class}};
return Arrays.asList(params);
}
@Parameterized.Parameter
@SuppressWarnings("visibilitymodifier")
public static Class clientProtocol;
@BeforeClass
public static void init() throws Exception {
suite = new RatisTestHelper.RatisTestSuite();
conf = suite.getConf();
}
@Before
public void setup() throws Exception {
if (clientProtocol.equals(RestClient.class)) {
client = new RestClient(conf);
} else {
client = new RpcClient(conf);
}
}
@AfterClass
public static void shutdown() {
if (suite != null) {
suite.close();
}
}
@Test
public void testCreateBucket() throws Exception {
TestBuckets.runTestCreateBucket(client);
}
@Test
public void testAddBucketAcls() throws Exception {
TestBuckets.runTestAddBucketAcls(client);
}
@Test
public void testRemoveBucketAcls() throws Exception {
TestBuckets.runTestRemoveBucketAcls(client);
}
@Test
public void testDeleteBucket() throws OzoneException, IOException {
TestBuckets.runTestDeleteBucket(client);
}
@Test
public void testListBucket() throws Exception {
TestBuckets.runTestListBucket(client);
}
}

View File

@ -1,734 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.client;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.commons.collections.IteratorUtils;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.commons.lang3.RandomUtils;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.ozone.OzoneTestUtils;
import org.apache.hadoop.ozone.client.BucketArgs;
import org.apache.hadoop.ozone.client.VolumeArgs;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneKey;
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
import org.apache.hadoop.ozone.client.rpc.RpcClient;
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler;
import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.client.rest.OzoneException;
import org.apache.hadoop.ozone.web.utils.OzoneUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.InputStream;
import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.Random;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import static org.apache.hadoop.hdds
.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.OZONE_SCM_STALENODE_INTERVAL;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
/**
* Test Ozone Key Lifecycle.
*/
public class TestKeys {
/**
* Set the timeout for every test.
*/
@Rule
public Timeout testTimeout = new Timeout(300000);
private static OzoneConfiguration conf;
private static MiniOzoneCluster ozoneCluster = null;
private static String path;
private static ClientProtocol client = null;
private static long currentTime;
private static ReplicationFactor replicationFactor = ReplicationFactor.ONE;
private static ReplicationType replicationType = ReplicationType.STAND_ALONE;
/**
* Create a MiniDFSCluster for testing.
*
* @throws IOException
*/
@Before
public void init() throws Exception {
conf = new OzoneConfiguration();
// Set short block deleting service interval to speed up deletions.
conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
1000, TimeUnit.MILLISECONDS);
conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS);
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 30, TimeUnit.SECONDS);
path = GenericTestUtils.getTempPath(TestKeys.class.getSimpleName());
Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG);
ozoneCluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(1)
.setHbInterval(1000)
.setHbProcessorInterval(1000)
.build();
ozoneCluster.waitForClusterToBeReady();
client = new RpcClient(conf);
currentTime = Time.now();
}
/**
* shutdown MiniDFSCluster.
*/
@After
public void shutdown() {
if (ozoneCluster != null) {
ozoneCluster.shutdown();
}
}
/**
* Creates a file with Random Data.
*
* @return File.
*/
static File createRandomDataFile(String dir, String fileName, long size)
throws IOException {
File tmpDir = new File(dir);
FileUtils.forceMkdir(tmpDir);
File tmpFile = new File(tmpDir, fileName);
try (FileOutputStream randFile = new FileOutputStream(tmpFile)) {
Random r = new Random();
for (int x = 0; x < size; x++) {
char c = (char) (r.nextInt(26) + 'a');
randFile.write(c);
}
} catch (IOException e) {
fail(e.getMessage());
}
return tmpFile;
}
/**
* This function generates multi part key which are delimited by a certain
* delimiter. Different parts of key are random string of random length
* between 0 - 4. Number of parts of the keys are between 0 and 5.
*
* @param delimiter delimiter used to delimit parts of string
* @return Key composed of multiple parts delimited by "/"
*/
static String getMultiPartKey(String delimiter) {
int numParts = RandomUtils.nextInt(0, 5) + 1;
String[] nameParts = new String[numParts];
for (int i = 0; i < numParts; i++) {
int stringLength = numParts == 1 ? 5 : RandomUtils.nextInt(0, 5);
nameParts[i] = RandomStringUtils.randomAlphanumeric(stringLength);
}
return StringUtils.join(delimiter, nameParts);
}
static class PutHelper {
private final ClientProtocol client;
private final String dir;
private final String keyName;
private OzoneVolume vol;
private OzoneBucket bucket;
private File file;
PutHelper(ClientProtocol client, String dir) {
this(client, dir, OzoneUtils.getRequestID().toLowerCase());
}
PutHelper(ClientProtocol client, String dir, String key) {
this.client = client;
this.dir = dir;
this.keyName = key;
}
public OzoneVolume getVol() {
return vol;
}
public OzoneBucket getBucket() {
return bucket;
}
public File getFile() {
return file;
}
/**
* This function is reused in all other tests.
*
* @return Returns the name of the new key that was created.
* @throws OzoneException
*/
private String putKey() throws Exception {
String volumeName = OzoneUtils.getRequestID().toLowerCase();
VolumeArgs volumeArgs = VolumeArgs.newBuilder()
.setOwner("bilbo")
.setQuota("100TB")
.setAdmin("hdfs")
.build();
client.createVolume(volumeName, volumeArgs);
vol = client.getVolumeDetails(volumeName);
String[] acls = {"user:frodo:rw", "user:samwise:rw"};
String bucketName = OzoneUtils.getRequestID().toLowerCase();
List<OzoneAcl> aclList =
Arrays.stream(acls).map(acl -> OzoneAcl.parseAcl(acl))
.collect(Collectors.toList());
BucketArgs bucketArgs = BucketArgs.newBuilder()
.setAcls(aclList)
.build();
vol.createBucket(bucketName, bucketArgs);
bucket = vol.getBucket(bucketName);
String fileName = OzoneUtils.getRequestID().toLowerCase();
file = createRandomDataFile(dir, fileName, 1024);
try (
OzoneOutputStream ozoneOutputStream = bucket
.createKey(keyName, 0, replicationType, replicationFactor,
new HashMap<>());
InputStream fileInputStream = new FileInputStream(file)) {
IOUtils.copy(fileInputStream, ozoneOutputStream);
}
return keyName;
}
}
@Test
public void testPutKey() throws Exception {
// Test non-delimited keys
runTestPutKey(new PutHelper(client, path));
// Test key delimited by a random delimiter
String delimiter = RandomStringUtils.randomAscii(1);
runTestPutKey(new PutHelper(client, path,
getMultiPartKey(delimiter)));
}
@SuppressWarnings("emptyblock")
static void runTestPutKey(PutHelper helper) throws Exception {
final ClientProtocol helperClient = helper.client;
helper.putKey();
assertNotNull(helper.getBucket());
assertNotNull(helper.getFile());
List<OzoneKey> keyList = helperClient
.listKeys(helper.getVol().getName(), helper.getBucket().getName(), null,
null, 10);
Assert.assertEquals(1, keyList.size());
// test list key using a more efficient call
String newkeyName = OzoneUtils.getRequestID().toLowerCase();
OzoneOutputStream ozoneOutputStream = helperClient
.createKey(helper.getVol().getName(), helper.getBucket().getName(),
newkeyName, 0, replicationType, replicationFactor, new HashMap<>());
ozoneOutputStream.close();
keyList = helperClient
.listKeys(helper.getVol().getName(), helper.getBucket().getName(), null,
null, 10);
Assert.assertEquals(2, keyList.size());
// test new put key with invalid volume/bucket name
OzoneTestUtils.expectOmException(ResultCodes.VOLUME_NOT_FOUND, () -> {
try (OzoneOutputStream oos = helperClient
.createKey("invalid-volume", helper.getBucket().getName(), newkeyName,
0, replicationType, replicationFactor, new HashMap<>())) {
}
});
OzoneTestUtils.expectOmException(ResultCodes.BUCKET_NOT_FOUND, () -> {
try (OzoneOutputStream oos = helperClient
.createKey(helper.getVol().getName(), "invalid-bucket", newkeyName, 0,
replicationType, replicationFactor, new HashMap<>())) {
}
});
}
private static void restartDatanode(MiniOzoneCluster cluster, int datanodeIdx)
throws Exception {
cluster.restartHddsDatanode(datanodeIdx, true);
}
@Test
public void testPutAndGetKeyWithDnRestart() throws Exception {
runTestPutAndGetKeyWithDnRestart(
new PutHelper(client, path), ozoneCluster);
String delimiter = RandomStringUtils.randomAscii(1);
runTestPutAndGetKeyWithDnRestart(
new PutHelper(client, path,
getMultiPartKey(delimiter)), ozoneCluster);
}
static void runTestPutAndGetKeyWithDnRestart(
PutHelper helper, MiniOzoneCluster cluster) throws Exception {
String keyName = helper.putKey();
assertNotNull(helper.getBucket());
assertNotNull(helper.getFile());
// restart the datanode
restartDatanode(cluster, 0);
// verify getKey after the datanode restart
String newFileName = helper.dir + "/"
+ OzoneUtils.getRequestID().toLowerCase();
Path newPath = Paths.get(newFileName);
try (
FileOutputStream newOutputStream = new FileOutputStream(
newPath.toString());
OzoneInputStream ozoneInputStream = helper.client
.getKey(helper.getVol().getName(), helper.getBucket().getName(),
keyName)) {
IOUtils.copy(ozoneInputStream, newOutputStream);
}
try (
FileInputStream original = new FileInputStream(helper.getFile());
FileInputStream downloaded = new FileInputStream(newPath.toFile())) {
String originalHash = DigestUtils.sha256Hex(original);
String downloadedHash = DigestUtils.sha256Hex(downloaded);
assertEquals(
"Sha256 does not match between original file and downloaded file.",
originalHash, downloadedHash);
}
}
@Test
public void testPutAndGetKey() throws Exception {
runTestPutAndGetKey(new PutHelper(client, path));
String delimiter = RandomStringUtils.randomAscii(1);
runTestPutAndGetKey(new PutHelper(client, path,
getMultiPartKey(delimiter)));
}
static void runTestPutAndGetKey(PutHelper helper) throws Exception {
final ClientProtocol helperClient = helper.client;
String keyName = helper.putKey();
assertNotNull(helper.getBucket());
assertNotNull(helper.getFile());
final String newFileName1 = helper.dir + "/"
+ OzoneUtils.getRequestID().toLowerCase();
final String newFileName2 = helper.dir + "/"
+ OzoneUtils.getRequestID().toLowerCase();
Path newPath1 = Paths.get(newFileName1);
Path newPath2 = Paths.get(newFileName2);
try (
FileOutputStream newOutputStream = new FileOutputStream(
newPath1.toString());
OzoneInputStream ozoneInputStream = helper.getBucket()
.readKey(keyName)) {
IOUtils.copy(ozoneInputStream, newOutputStream);
}
// test get key using a more efficient call
try (
FileOutputStream newOutputStream = new FileOutputStream(
newPath2.toString());
OzoneInputStream ozoneInputStream = helper.getBucket()
.readKey(keyName)) {
IOUtils.copy(ozoneInputStream, newOutputStream);
}
try (FileInputStream original = new FileInputStream(helper.getFile());
FileInputStream downloaded1 = new FileInputStream(newPath1.toFile());
FileInputStream downloaded2 = new FileInputStream(newPath1.toFile())) {
String originalHash = DigestUtils.sha256Hex(original);
String downloadedHash1 = DigestUtils.sha256Hex(downloaded1);
String downloadedHash2 = DigestUtils.sha256Hex(downloaded2);
assertEquals(
"Sha256 does not match between original file and downloaded file.",
originalHash, downloadedHash1);
assertEquals(
"Sha256 does not match between original file and downloaded file.",
originalHash, downloadedHash2);
// test new get key with invalid volume/bucket name
OzoneTestUtils.expectOmException(ResultCodes.KEY_NOT_FOUND,
() -> helperClient.getKey(
"invalid-volume", helper.getBucket().getName(), keyName));
OzoneTestUtils.expectOmException(ResultCodes.KEY_NOT_FOUND,
() -> helperClient.getKey(
helper.getVol().getName(), "invalid-bucket", keyName));
}
}
@Test
public void testPutAndDeleteKey() throws Exception {
runTestPutAndDeleteKey(new PutHelper(client, path));
String delimiter = RandomStringUtils.randomAscii(1);
runTestPutAndDeleteKey(new PutHelper(client, path,
getMultiPartKey(delimiter)));
}
static void runTestPutAndDeleteKey(PutHelper helper) throws Exception {
String keyName = helper.putKey();
assertNotNull(helper.getBucket());
assertNotNull(helper.getFile());
helper.getBucket().deleteKey(keyName);
OzoneTestUtils.expectOmException(ResultCodes.KEY_NOT_FOUND, () -> {
helper.getBucket().getKey(keyName);
});
}
@Test
public void testPutAndListKey() throws Exception {
runTestPutAndListKey(new PutHelper(client, path));
String delimiter = RandomStringUtils.randomAscii(1);
runTestPutAndListKey(new PutHelper(client, path,
getMultiPartKey(delimiter)));
}
static void runTestPutAndListKey(PutHelper helper) throws Exception {
ClientProtocol helperClient = helper.client;
helper.putKey();
assertNotNull(helper.getBucket());
assertNotNull(helper.getFile());
// add keys [list-key0, list-key1, ..., list-key9]
for (int x = 0; x < 10; x++) {
String newkeyName = "list-key" + x;
try (
OzoneOutputStream ozoneOutputStream = helper.getBucket()
.createKey(newkeyName, 0, replicationType, replicationFactor,
new HashMap<>());
InputStream fileInputStream = new FileInputStream(helper.getFile())) {
IOUtils.copy(fileInputStream, ozoneOutputStream);
}
}
List<OzoneKey> keyList1 =
IteratorUtils.toList(helper.getBucket().listKeys(null, null));
// test list key using a more efficient call
List<OzoneKey> keyList2 = helperClient
.listKeys(helper.getVol().getName(), helper.getBucket().getName(), null,
null, 100);
Assert.assertEquals(11, keyList1.size());
Assert.assertEquals(11, keyList2.size());
// Verify the key creation/modification time. Here we compare the time in
// second unit since the date string reparsed to millisecond will
// lose precision.
for (OzoneKey key : keyList1) {
assertTrue((key.getCreationTime() / 1000) >= (currentTime / 1000));
assertTrue((key.getModificationTime() / 1000) >= (currentTime / 1000));
}
for (OzoneKey key : keyList2) {
assertTrue((key.getCreationTime() / 1000) >= (currentTime / 1000));
assertTrue((key.getModificationTime() / 1000) >= (currentTime / 1000));
}
// test maxLength parameter of list keys
keyList2 = helperClient
.listKeys(helper.getVol().getName(), helper.getBucket().getName(), null,
null, 1);
Assert.assertEquals(1, keyList2.size());
// test startKey parameter of list keys
keyList1 = IteratorUtils
.toList(helper.getBucket().listKeys("list-key", "list-key4"));
keyList2 = helperClient
.listKeys(helper.getVol().getName(), helper.getBucket().getName(),
"list-key", "list-key4", 100);
Assert.assertEquals(5, keyList1.size());
Assert.assertEquals(5, keyList2.size());
// test prefix parameter of list keys
keyList1 =
IteratorUtils.toList(helper.getBucket().listKeys("list-key2", null));
keyList2 = helperClient
.listKeys(helper.getVol().getName(), helper.getBucket().getName(),
"list-key2", null, 100);
Assert.assertTrue(
keyList1.size() == 1 && keyList1.get(0).getName().equals("list-key2"));
Assert.assertTrue(
keyList2.size() == 1 && keyList2.get(0).getName().equals("list-key2"));
// test new list keys with invalid volume/bucket name
OzoneTestUtils.expectOmException(ResultCodes.BUCKET_NOT_FOUND, () -> {
helperClient.listKeys("invalid-volume", helper.getBucket().getName(),
null, null, 100);
});
OzoneTestUtils.expectOmException(ResultCodes.BUCKET_NOT_FOUND, () -> {
helperClient.listKeys(helper.getVol().getName(), "invalid-bucket", null,
null, 100);
});
}
@Test
public void testGetKeyInfo() throws Exception {
runTestGetKeyInfo(new PutHelper(client, path));
String delimiter = RandomStringUtils.randomAscii(1);
runTestGetKeyInfo(new PutHelper(client, path,
getMultiPartKey(delimiter)));
}
static void runTestGetKeyInfo(PutHelper helper) throws Exception {
String keyName = helper.putKey();
assertNotNull(helper.getBucket());
assertNotNull(helper.getFile());
OzoneKey keyInfo = helper.getBucket().getKey(keyName);
assertNotNull(keyInfo);
assertEquals(keyName, keyInfo.getName());
// Compare the time in second unit since the date string reparsed to
// millisecond will lose precision.
Assert
.assertTrue((keyInfo.getCreationTime() / 1000) >= (currentTime / 1000));
Assert.assertTrue(
(keyInfo.getModificationTime() / 1000) >= (currentTime / 1000));
}
// Volume, bucket, keys info that helps for test create/delete keys.
private static class BucketKeys {
private Map<Pair<String, String>, List<String>> buckets;
BucketKeys() {
buckets = Maps.newHashMap();
}
void addKey(String volume, String bucket, String key) {
// check if this bucket exists
for (Map.Entry<Pair<String, String>, List<String>> entry :
buckets.entrySet()) {
if (entry.getKey().getValue().equals(bucket)) {
entry.getValue().add(key);
return;
}
}
// bucket not exist
Pair<String, String> newBucket = new ImmutablePair(volume, bucket);
List<String> keyList = Lists.newArrayList();
keyList.add(key);
buckets.put(newBucket, keyList);
}
Set<Pair<String, String>> getAllBuckets() {
return buckets.keySet();
}
List<String> getBucketKeys(String bucketName) {
for (Map.Entry<Pair<String, String>, List<String>> entry : buckets
.entrySet()) {
if (entry.getKey().getValue().equals(bucketName)) {
return entry.getValue();
}
}
return Lists.newArrayList();
}
int totalNumOfKeys() {
int count = 0;
for (Map.Entry<Pair<String, String>, List<String>> entry : buckets
.entrySet()) {
count += entry.getValue().size();
}
return count;
}
}
private int countOmKeys(OzoneManager om) throws IOException {
int totalCount = 0;
List<OmVolumeArgs> volumes =
om.listAllVolumes(null, null, Integer.MAX_VALUE);
for (OmVolumeArgs volume : volumes) {
List<OmBucketInfo> buckets =
om.listBuckets(volume.getVolume(), null, null, Integer.MAX_VALUE);
for (OmBucketInfo bucket : buckets) {
List<OmKeyInfo> keys = om.listKeys(bucket.getVolumeName(),
bucket.getBucketName(), null, null, Integer.MAX_VALUE);
totalCount += keys.size();
}
}
return totalCount;
}
@Test
@Ignore("Until delete background service is fixed.")
public void testDeleteKey() throws Exception {
OzoneManager ozoneManager = ozoneCluster.getOzoneManager();
// To avoid interference from other test cases,
// we collect number of existing keys at the beginning
int numOfExistedKeys = countOmKeys(ozoneManager);
// Keep tracking bucket keys info while creating them
PutHelper helper = new PutHelper(client, path);
BucketKeys bucketKeys = new BucketKeys();
for (int i = 0; i < 20; i++) {
String keyName = helper.putKey();
bucketKeys.addKey(helper.getVol().getName(), helper.getBucket().getName(),
keyName);
}
// There should be 20 keys in the buckets we just created.
Assert.assertEquals(20, bucketKeys.totalNumOfKeys());
int numOfCreatedKeys = 0;
OzoneContainer cm = ozoneCluster.getHddsDatanodes().get(0)
.getDatanodeStateMachine().getContainer();
// Expected to delete chunk file list.
List<File> expectedChunkFiles = Lists.newArrayList();
// Iterate over all buckets, and list all keys in each bucket,
// count the total number of created keys.
Set<Pair<String, String>> buckets = bucketKeys.getAllBuckets();
for (Pair<String, String> buk : buckets) {
List<OmKeyInfo> createdKeys =
ozoneManager.listKeys(buk.getKey(), buk.getValue(), null, null, 20);
// Memorize chunks that has been created,
// so we can verify actual deletions at DN side later.
for (OmKeyInfo keyInfo : createdKeys) {
List<OmKeyLocationInfo> locations =
keyInfo.getLatestVersionLocations().getLocationList();
OzoneTestUtils.closeContainers(keyInfo.getKeyLocationVersions(),
ozoneCluster.getStorageContainerManager());
for (OmKeyLocationInfo location : locations) {
KeyValueHandler keyValueHandler = (KeyValueHandler) cm
.getDispatcher().getHandler(ContainerProtos.ContainerType
.KeyValueContainer);
KeyValueContainer container = (KeyValueContainer) cm.getContainerSet()
.getContainer(location.getBlockID().getContainerID());
BlockData blockInfo = keyValueHandler.getBlockManager()
.getBlock(container, location.getBlockID());
KeyValueContainerData containerData =
(KeyValueContainerData) container.getContainerData();
File dataDir = new File(containerData.getChunksPath());
for (ContainerProtos.ChunkInfo chunkInfo : blockInfo.getChunks()) {
File chunkFile = dataDir.toPath()
.resolve(chunkInfo.getChunkName()).toFile();
System.out.println("Chunk File created: "
+ chunkFile.getAbsolutePath());
Assert.assertTrue(chunkFile.exists());
expectedChunkFiles.add(chunkFile);
}
}
}
numOfCreatedKeys += createdKeys.size();
}
// Ensure all keys are created.
Assert.assertEquals(20, numOfCreatedKeys);
// Ensure all keys are visible from OM.
// Total number should be numOfCreated + numOfExisted
Assert.assertEquals(20 + numOfExistedKeys, countOmKeys(ozoneManager));
// Delete 10 keys
int delCount = 20;
Set<Pair<String, String>> allBuckets = bucketKeys.getAllBuckets();
for (Pair<String, String> bucketInfo : allBuckets) {
List<String> bks = bucketKeys.getBucketKeys(bucketInfo.getValue());
for (String keyName : bks) {
if (delCount > 0) {
OmKeyArgs arg =
new OmKeyArgs.Builder().setVolumeName(bucketInfo.getKey())
.setBucketName(bucketInfo.getValue()).setKeyName(keyName)
.build();
ozoneManager.deleteKey(arg);
delCount--;
}
}
}
// It should be pretty quick that keys are removed from OM namespace,
// because actual deletion happens in async mode.
GenericTestUtils.waitFor(() -> {
try {
int num = countOmKeys(ozoneManager);
return num == (numOfExistedKeys);
} catch (IOException e) {
return false;
}
}, 1000, 10000);
// It might take a while until all blocks are actually deleted,
// verify all chunk files created earlier are removed from disk.
GenericTestUtils.waitFor(
() -> expectedChunkFiles.stream().allMatch(file -> !file.exists()),
1000, 60000);
}
}

View File

@ -1,126 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.client;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.RatisTestHelper;
import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import static org.apache.hadoop.ozone.web.client
.TestKeys.PutHelper;
import static org.apache.hadoop.ozone.web.client
.TestKeys.getMultiPartKey;
import static org.apache.hadoop.ozone.web.client
.TestKeys.runTestGetKeyInfo;
import static org.apache.hadoop.ozone.web.client
.TestKeys.runTestPutAndDeleteKey;
import static org.apache.hadoop.ozone.web.client
.TestKeys.runTestPutAndGetKey;
import static org.apache.hadoop.ozone.web.client
.TestKeys.runTestPutAndGetKeyWithDnRestart;
import static org.apache.hadoop.ozone.web.client
.TestKeys.runTestPutAndListKey;
import static org.apache.hadoop.ozone.web.client
.TestKeys.runTestPutKey;
/** The same as {@link TestKeys} except that this test is Ratis enabled. */
public class TestKeysRatis {
@Rule
public Timeout testTimeout = new Timeout(300000);
private static RatisTestHelper.RatisTestSuite suite;
private static MiniOzoneCluster ozoneCluster = null;
static private String path;
private static ClientProtocol client = null;
@BeforeClass
public static void init() throws Exception {
suite = new RatisTestHelper.RatisTestSuite();
path = GenericTestUtils.getTempPath(TestKeysRatis.class.getSimpleName());
ozoneCluster = suite.getCluster();
ozoneCluster.waitForClusterToBeReady();
client = suite.newOzoneClient();
}
/**
* shutdown MiniDFSCluster.
*/
@AfterClass
public static void shutdown() {
if (suite != null) {
suite.close();
}
}
@Test
public void testPutKey() throws Exception {
runTestPutKey(new PutHelper(client, path));
String delimiter = RandomStringUtils.randomAlphanumeric(1);
runTestPutKey(new PutHelper(client, path,
getMultiPartKey(delimiter)));
}
@Test
public void testPutAndGetKeyWithDnRestart() throws Exception {
runTestPutAndGetKeyWithDnRestart(
new PutHelper(client, path), ozoneCluster);
String delimiter = RandomStringUtils.randomAlphanumeric(1);
runTestPutAndGetKeyWithDnRestart(
new PutHelper(client, path, getMultiPartKey(delimiter)),
ozoneCluster);
}
@Test
public void testPutAndGetKey() throws Exception {
runTestPutAndGetKey(new PutHelper(client, path));
String delimiter = RandomStringUtils.randomAlphanumeric(1);
runTestPutAndGetKey(new PutHelper(client, path,
getMultiPartKey(delimiter)));
}
@Test
public void testPutAndDeleteKey() throws Exception {
runTestPutAndDeleteKey(new PutHelper(client, path));
String delimiter = RandomStringUtils.randomAlphanumeric(1);
runTestPutAndDeleteKey(new PutHelper(client, path,
getMultiPartKey(delimiter)));
}
@Test
public void testPutAndListKey() throws Exception {
runTestPutAndListKey(new PutHelper(client, path));
String delimiter = RandomStringUtils.randomAlphanumeric(1);
runTestPutAndListKey(new PutHelper(client, path,
getMultiPartKey(delimiter)));
}
@Test
public void testGetKeyInfo() throws Exception {
runTestGetKeyInfo(new PutHelper(client, path));
String delimiter = RandomStringUtils.randomAlphanumeric(1);
runTestGetKeyInfo(new PutHelper(client, path,
getMultiPartKey(delimiter)));
}
}

View File

@ -1,304 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.client;
import io.netty.bootstrap.Bootstrap;
import io.netty.channel.Channel;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelOption;
import io.netty.channel.ChannelPipeline;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.SimpleChannelInboundHandler;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.SocketChannel;
import io.netty.channel.socket.nio.NioSocketChannel;
import io.netty.handler.codec.http.DefaultFullHttpRequest;
import io.netty.handler.codec.http.FullHttpRequest;
import io.netty.handler.codec.http.HttpClientCodec;
import io.netty.handler.codec.http.HttpContent;
import io.netty.handler.codec.http.HttpContentDecompressor;
import io.netty.handler.codec.http.HttpMethod;
import io.netty.handler.codec.http.HttpObject;
import io.netty.handler.codec.http.HttpResponse;
import io.netty.handler.codec.http.HttpResponseStatus;
import io.netty.handler.codec.http.HttpVersion;
import io.netty.handler.codec.http.LastHttpContent;
import io.netty.handler.logging.LogLevel;
import io.netty.handler.logging.LoggingHandler;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.client.rest.headers.Header;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Time;
import org.apache.http.HttpEntity;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.utils.URIBuilder;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
import org.apache.http.util.EntityUtils;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import javax.ws.rs.core.HttpHeaders;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.URI;
import java.net.URISyntaxException;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Locale;
import java.util.UUID;
import static io.netty.util.CharsetUtil.UTF_8;
/**
* Unit tests for Ozone client connection reuse with Apache HttpClient and Netty
* based HttpClient.
*/
public class TestOzoneClient {
private static Logger log = Logger.getLogger(TestOzoneClient.class);
private static int testVolumeCount = 5;
private static MiniOzoneCluster cluster = null;
private static String endpoint = null;
@BeforeClass
public static void init() throws Exception {
Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.ALL);
OzoneConfiguration conf = new OzoneConfiguration();
cluster = MiniOzoneCluster.newBuilder(conf).build();
cluster.waitForClusterToBeReady();
int port = cluster.getHddsDatanodes().get(0)
.getDatanodeDetails()
.getPort(DatanodeDetails.Port.Name.REST).getValue();
endpoint = String.format("http://localhost:%d", port);
}
@AfterClass
public static void shutdown() {
if (cluster != null) {
cluster.shutdown();
}
}
@Test(timeout = 5000)
public void testNewConnectionPerRequest()
throws IOException, URISyntaxException {
for (int i = 0; i < testVolumeCount; i++) {
try (CloseableHttpClient httpClient =
HttpClients.createDefault()) {
createVolume(getRandomVolumeName(i), httpClient);
}
}
}
/**
* Object handler should be able to serve multiple requests from
* a single http client. This allows the client side to reuse
* http connections in a connection pool instead of creating a new
* connection per request which consumes resource heavily.
*
*/
@Test(timeout = 5000)
public void testReuseWithApacheHttpClient()
throws IOException, URISyntaxException {
PoolingHttpClientConnectionManager cm =
new PoolingHttpClientConnectionManager();
cm.setMaxTotal(200);
cm.setDefaultMaxPerRoute(20);
try (CloseableHttpClient httpClient =
HttpClients.custom().setConnectionManager(cm).build()) {
for (int i = 0; i < testVolumeCount; i++) {
createVolume(getRandomVolumeName(i), httpClient);
}
}
}
@Test(timeout = 10000)
public void testReuseWithNettyHttpClient()
throws IOException, InterruptedException, URISyntaxException {
URI uri = new URI(endpoint);
String host = uri.getHost() == null? "127.0.0.1" : uri.getHost();
int port = uri.getPort();
EventLoopGroup workerGroup = new NioEventLoopGroup();
try {
Bootstrap b = new Bootstrap();
b.group(workerGroup)
.channel(NioSocketChannel.class)
.option(ChannelOption.SO_KEEPALIVE, true)
.option(ChannelOption.SO_REUSEADDR, true)
.handler(new ChannelInitializer<SocketChannel>() {
/**
* This method will be called once the {@link Channel} was
* registered. After the method returns this instance
* will be removed from the {@link ChannelPipeline}
* of the {@link Channel}.
*
* @param ch the {@link Channel} which was registered.
* @throws Exception is thrown if an error occurs.
* In that case the {@link Channel} will be closed.
*/
@Override
public void initChannel(SocketChannel ch) {
ChannelPipeline p = ch.pipeline();
// Comment the following line if you don't want client http trace
p.addLast("log", new LoggingHandler(LogLevel.INFO));
p.addLast(new HttpClientCodec());
p.addLast(new HttpContentDecompressor());
p.addLast(new NettyHttpClientHandler());
}
});
Channel ch = b.connect(host, port).sync().channel();
for (int i = 0; i < testVolumeCount; i++) {
String volumeName = getRandomVolumeName(i);
try {
sendNettyCreateVolumeRequest(ch, volumeName);
Thread.sleep(1000);
} catch (Exception e) {
e.printStackTrace();
}
}
Thread.sleep(1000);
ch.close();
// Wait for the server to close the connection.
ch.closeFuture().sync();
} catch (Exception ex) {
log.error("Error received in client setup", ex);
}finally {
workerGroup.shutdownGracefully();
}
}
class NettyHttpClientHandler extends
SimpleChannelInboundHandler<HttpObject> {
@Override
public void channelRead0(ChannelHandlerContext ctx, HttpObject msg) {
if (msg instanceof HttpResponse) {
HttpResponse response = (HttpResponse) msg;
log.info("STATUS: " + response.getStatus());
log.info("VERSION: " + response.getProtocolVersion());
Assert.assertEquals(HttpResponseStatus.CREATED.code(),
response.getStatus().code());
}
if (msg instanceof HttpContent) {
HttpContent content = (HttpContent) msg;
log.info(content.content().toString(UTF_8));
if (content instanceof LastHttpContent) {
log.info("END OF CONTENT");
}
}
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
log.error("Exception upon channel read", cause);
ctx.close();
}
}
private String getRandomVolumeName(int index) {
UUID id = UUID.randomUUID();
return "test-volume-" + index + "-" + id;
}
// Prepare the HTTP request and send it over the netty channel.
private void sendNettyCreateVolumeRequest(Channel channel, String volumeName)
throws URISyntaxException, IOException {
URIBuilder builder = new URIBuilder(endpoint);
builder.setPath("/" + volumeName);
URI uri = builder.build();
String host = uri.getHost() == null ? "127.0.0.1" : uri.getHost();
FullHttpRequest request = new DefaultFullHttpRequest(
HttpVersion.HTTP_1_1, HttpMethod.POST, uri.getRawPath());
SimpleDateFormat format =
new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
request.headers().set(HttpHeaders.HOST, host);
request.headers().add(HttpHeaders.CONTENT_TYPE, "application/json");
request.headers().set(Header.OZONE_VERSION_HEADER,
Header.OZONE_V1_VERSION_HEADER);
request.headers().set(HttpHeaders.DATE,
format.format(new Date(Time.monotonicNow())));
request.headers().set(Header.OZONE_USER,
UserGroupInformation.getCurrentUser().getUserName());
request.headers().set(HttpHeaders.AUTHORIZATION,
Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " "
+ OzoneConsts.OZONE_SIMPLE_HDFS_USER);
// Send the HTTP request via netty channel.
channel.writeAndFlush(request);
}
// It is caller's responsibility to close the client.
private void createVolume(String volumeName, CloseableHttpClient httpClient)
throws IOException, URISyntaxException {
HttpPost create1 =
getCreateVolumeRequest(volumeName);
HttpEntity entity = null;
try {
CloseableHttpResponse response1 =
httpClient.execute(create1);
Assert.assertEquals(HttpURLConnection.HTTP_CREATED,
response1.getStatusLine().getStatusCode());
entity = response1.getEntity();
} catch (IOException e) {
e.printStackTrace();
} finally {
EntityUtils.consumeQuietly(entity);
}
}
private HttpPost getCreateVolumeRequest(String volumeName)
throws URISyntaxException, IOException {
URIBuilder builder = new URIBuilder(endpoint);
builder.setPath("/" + volumeName);
HttpPost httpPost = new HttpPost(builder.build().toString());
SimpleDateFormat format =
new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
httpPost.addHeader(Header.OZONE_VERSION_HEADER,
Header.OZONE_V1_VERSION_HEADER);
httpPost.addHeader(HttpHeaders.DATE,
format.format(new Date(Time.monotonicNow())));
httpPost.addHeader(Header.OZONE_USER,
UserGroupInformation.getCurrentUser().getUserName());
httpPost.addHeader(HttpHeaders.AUTHORIZATION,
Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " "
+ OzoneConsts.OZONE_SIMPLE_HDFS_USER);
return httpPost;
}
}

View File

@ -1,381 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.client;
import java.io.File;
import java.io.IOException;
import java.text.ParseException;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import org.apache.hadoop.hdds.client.OzoneQuota;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneTestUtils;
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.client.VolumeArgs;
import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
import org.apache.hadoop.ozone.client.rest.OzoneException;
import org.apache.hadoop.ozone.client.rest.RestClient;
import org.apache.hadoop.ozone.client.rpc.RpcClient;
import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
import org.apache.hadoop.ozone.web.utils.OzoneUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.junit.AfterClass;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
/**
* Test Ozone Volumes Lifecycle.
*/
@RunWith(value = Parameterized.class)
public class TestVolume {
private static MiniOzoneCluster cluster = null;
private static ClientProtocol client = null;
private static OzoneConfiguration conf;
@Parameterized.Parameters
public static Collection<Object[]> clientProtocol() {
Object[][] params = new Object[][] {
{RpcClient.class}};
return Arrays.asList(params);
}
@SuppressWarnings("visibilitymodifier")
@Parameterized.Parameter
public Class clientProtocol;
/**
* Create a MiniDFSCluster for testing.
* <p>
* Ozone is made active by setting OZONE_ENABLED = true
*
* @throws IOException
*/
@BeforeClass
public static void init() throws Exception {
conf = new OzoneConfiguration();
String path = GenericTestUtils
.getTempPath(TestVolume.class.getSimpleName());
FileUtils.deleteDirectory(new File(path));
Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG);
cluster = MiniOzoneCluster.newBuilder(conf).build();
cluster.waitForClusterToBeReady();
}
@Before
public void setup() throws Exception {
if (clientProtocol.equals(RestClient.class)) {
client = new RestClient(conf);
} else {
client = new RpcClient(conf);
}
}
/**
* shutdown MiniDFSCluster.
*/
@AfterClass
public static void shutdown() {
if (cluster != null) {
cluster.shutdown();
}
}
@Test
public void testCreateVolume() throws Exception {
runTestCreateVolume(client);
}
static void runTestCreateVolume(ClientProtocol clientProtocol)
throws OzoneException, IOException, ParseException {
String volumeName = OzoneUtils.getRequestID().toLowerCase();
long currentTime = Time.now();
VolumeArgs volumeArgs = VolumeArgs.newBuilder()
.setOwner("bilbo")
.setQuota("100TB")
.setAdmin("hdfs")
.build();
clientProtocol.createVolume(volumeName, volumeArgs);
OzoneVolume vol = clientProtocol.getVolumeDetails(volumeName);
assertEquals(vol.getName(), volumeName);
assertEquals(vol.getAdmin(), "hdfs");
assertEquals(vol.getOwner(), "bilbo");
assertEquals(vol.getQuota(), OzoneQuota.parseQuota("100TB").sizeInBytes());
// verify the key creation time
assertTrue((vol.getCreationTime()
/ 1000) >= (currentTime / 1000));
// Test create a volume with invalid volume name,
// not use Rule here because the test method is static.
try {
String invalidVolumeName = "#" + OzoneUtils.getRequestID().toLowerCase();
clientProtocol.createVolume(invalidVolumeName);
/*
//TODO: RestClient and RpcClient should use HddsClientUtils to verify name
fail("Except the volume creation be failed because the"
+ " volume name starts with an invalid char #");*/
} catch (Exception e) {
assertTrue(e.getMessage().contains("Bucket or Volume name"
+ " has an unsupported character : #"));
}
}
@Test
public void testCreateDuplicateVolume() throws Exception {
runTestCreateDuplicateVolume(client);
}
static void runTestCreateDuplicateVolume(ClientProtocol clientProtocol)
throws Exception {
clientProtocol.createVolume("testvol");
OzoneTestUtils.expectOmException(ResultCodes.VOLUME_ALREADY_EXISTS,
() -> clientProtocol.createVolume("testvol"));
}
@Test
public void testDeleteVolume() throws OzoneException, IOException {
runTestDeleteVolume(client);
}
static void runTestDeleteVolume(ClientProtocol clientProtocol)
throws OzoneException, IOException {
String volumeName = OzoneUtils.getRequestID().toLowerCase();
clientProtocol.createVolume(volumeName);
clientProtocol.deleteVolume(volumeName);
}
@Test
public void testChangeOwnerOnVolume() throws Exception {
runTestChangeOwnerOnVolume(client);
}
static void runTestChangeOwnerOnVolume(ClientProtocol clientProtocol)
throws OzoneException, ParseException, IOException {
String volumeName = OzoneUtils.getRequestID().toLowerCase();
clientProtocol.createVolume(volumeName);
clientProtocol.getVolumeDetails(volumeName);
clientProtocol.setVolumeOwner(volumeName, "frodo");
OzoneVolume newVol = clientProtocol.getVolumeDetails(volumeName);
assertEquals(newVol.getOwner(), "frodo");
// verify if the creation time is missing after setting owner operation
assertTrue(newVol.getCreationTime() > 0);
}
@Test
public void testChangeQuotaOnVolume() throws Exception {
runTestChangeQuotaOnVolume(client);
}
static void runTestChangeQuotaOnVolume(ClientProtocol clientProtocol)
throws OzoneException, IOException, ParseException {
String volumeName = OzoneUtils.getRequestID().toLowerCase();
clientProtocol.createVolume(volumeName);
clientProtocol.setVolumeQuota(volumeName, OzoneQuota.parseQuota("1000MB"));
OzoneVolume newVol = clientProtocol.getVolumeDetails(volumeName);
assertEquals(newVol.getQuota(),
OzoneQuota.parseQuota("1000MB").sizeInBytes());
// verify if the creation time is missing after setting quota operation
assertTrue(newVol.getCreationTime() > 0);
}
// Listing all volumes in the cluster feature has to be fixed after HDDS-357.
// TODO: fix this
@Ignore
@Test
public void testListVolume() throws OzoneException, IOException {
runTestListVolume(client);
}
static void runTestListVolume(ClientProtocol clientProtocol)
throws OzoneException, IOException {
for (int x = 0; x < 10; x++) {
String volumeName = OzoneUtils.getRequestID().toLowerCase();
clientProtocol.createVolume(volumeName);
}
List<OzoneVolume> ovols = clientProtocol.listVolumes(
UserGroupInformation.getCurrentUser().getUserName(), null, null, 100);
assertTrue(ovols.size() >= 10);
}
// TODO: remove @Ignore below once the problem has been resolved.
@Ignore("Takes 3m to run, disable for now.")
@Test
public void testListVolumePagination() throws OzoneException, IOException {
runTestListVolumePagination(client);
}
static void runTestListVolumePagination(ClientProtocol clientProtocol)
throws OzoneException, IOException {
final int volCount = 2000;
final int step = 100;
for (int x = 0; x < volCount; x++) {
String volumeName = OzoneUtils.getRequestID().toLowerCase();
clientProtocol.createVolume(volumeName);
}
String prevKey = null;
int count = 0;
int pagecount = 0;
while (count < volCount) {
List<OzoneVolume> ovols = clientProtocol.listVolumes(null, prevKey, step);
count += ovols.size();
prevKey = ovols.get(ovols.size() - 1).getName();
pagecount++;
}
assertEquals(volCount / step, pagecount);
}
// TODO: remove @Ignore below once the problem has been resolved.
@Ignore
@Test
public void testListAllVolumes() throws OzoneException, IOException {
runTestListAllVolumes(client);
}
static void runTestListAllVolumes(ClientProtocol clientProtocol)
throws OzoneException, IOException {
final int volCount = 200;
final int step = 10;
for (int x = 0; x < volCount; x++) {
String userName =
"frodo" + RandomStringUtils.randomAlphabetic(5).toLowerCase();
String volumeName =
"vol" + RandomStringUtils.randomAlphabetic(5).toLowerCase();
VolumeArgs volumeArgs = VolumeArgs.newBuilder()
.setOwner(userName)
.setQuota("100TB")
.setAdmin("hdfs")
.build();
clientProtocol.createVolume(volumeName, volumeArgs);
OzoneVolume vol = clientProtocol.getVolumeDetails(volumeName);
assertNotNull(vol);
}
String prevKey = null;
int count = 0;
int pagecount = 0;
while (count < volCount) {
List<OzoneVolume> ovols = clientProtocol.listVolumes(null, prevKey, step);
count += ovols.size();
if (ovols.size() > 0) {
prevKey = ovols.get(ovols.size() - 1).getName();
}
pagecount++;
}
// becasue we are querying an existing ozone store, there will
// be volumes created by other tests too. So we should get more page counts.
assertEquals(volCount / step, pagecount);
}
// Listing all volumes in the cluster feature has to be fixed after HDDS-357.
// TODO: fix this
@Ignore
@Test
public void testListVolumes() throws Exception {
runTestListVolumes(client);
}
static void runTestListVolumes(ClientProtocol clientProtocol)
throws OzoneException, IOException, ParseException {
final int volCount = 20;
final String user1 = "test-user-a";
final String user2 = "test-user-b";
long currentTime = Time.now();
// Create 20 volumes, 10 for user1 and another 10 for user2.
for (int x = 0; x < volCount; x++) {
String volumeName;
String userName;
if (x % 2 == 0) {
// create volume [test-vol0, test-vol2, ..., test-vol18] for user1
userName = user1;
volumeName = "test-vol" + x;
} else {
// create volume [test-vol1, test-vol3, ..., test-vol19] for user2
userName = user2;
volumeName = "test-vol" + x;
}
VolumeArgs volumeArgs = VolumeArgs.newBuilder()
.setOwner(userName)
.setQuota("100TB")
.setAdmin("hdfs")
.build();
clientProtocol.createVolume(volumeName, volumeArgs);
OzoneVolume vol = clientProtocol.getVolumeDetails(volumeName);
assertNotNull(vol);
}
// list all the volumes belong to user1
List<OzoneVolume> volumeList =
clientProtocol.listVolumes(user1, null, null, 100);
assertEquals(10, volumeList.size());
// verify the owner name and creation time of volume
for (OzoneVolume vol : volumeList) {
assertTrue(vol.getOwner().equals(user1));
assertTrue((vol.getCreationTime()
/ 1000) >= (currentTime / 1000));
}
// test max key parameter of listing volumes
volumeList = clientProtocol.listVolumes(user1, null, null, 2);
assertEquals(2, volumeList.size());
// test prefix parameter of listing volumes
volumeList = clientProtocol.listVolumes(user1, "test-vol10", null, 10);
assertTrue(volumeList.size() == 1
&& volumeList.get(0).getName().equals("test-vol10"));
volumeList = clientProtocol.listVolumes(user1, "test-vol1", null, 10);
assertEquals(5, volumeList.size());
// test start key parameter of listing volumes
volumeList = clientProtocol.listVolumes(user2, null, "test-vol15", 10);
assertEquals(2, volumeList.size());
String volumeName;
for (int x = 0; x < volCount; x++) {
volumeName = "test-vol" + x;
clientProtocol.deleteVolume(volumeName);
}
}
}

View File

@ -1,155 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.client;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
import org.apache.hadoop.ozone.client.rest.OzoneException;
import org.apache.hadoop.ozone.client.rest.RestClient;
import org.apache.hadoop.ozone.client.rpc.RpcClient;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.junit.*;
import org.junit.rules.Timeout;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
/** The same as {@link TestVolume} except that this test is Ratis enabled. */
@Ignore("Disabling Ratis tests for pipeline work.")
@RunWith(value = Parameterized.class)
public class TestVolumeRatis {
@Rule
public Timeout testTimeout = new Timeout(300000);
private static ClientProtocol client;
private static MiniOzoneCluster cluster;
private static OzoneConfiguration conf;
@Parameterized.Parameters
public static Collection<Object[]> clientProtocol() {
Object[][] params = new Object[][] {
{RpcClient.class},
{RestClient.class}};
return Arrays.asList(params);
}
@Parameterized.Parameter
@SuppressWarnings("visibilitymodifier")
public Class clientProtocol;
@BeforeClass
public static void init() throws Exception {
conf = new OzoneConfiguration();
// This enables Ratis in the cluster.
conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, true);
String path = GenericTestUtils
.getTempPath(TestVolume.class.getSimpleName());
FileUtils.deleteDirectory(new File(path));
Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG);
cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).build();
cluster.waitForClusterToBeReady();
final int port = cluster.getHddsDatanodes().get(0)
.getDatanodeDetails()
.getPort(DatanodeDetails.Port.Name.REST).getValue();
}
@Before
public void setup() throws Exception {
if (clientProtocol.equals(RestClient.class)) {
client = new RestClient(conf);
} else {
client = new RpcClient(conf);
}
}
@AfterClass
public static void shutdown() {
if (cluster != null) {
cluster.shutdown();
}
}
@Test
public void testCreateVolume() throws Exception {
TestVolume.runTestCreateVolume(client);
}
@Test
public void testCreateDuplicateVolume() throws Exception {
TestVolume.runTestCreateDuplicateVolume(client);
}
@Test
public void testDeleteVolume() throws OzoneException, IOException {
TestVolume.runTestDeleteVolume(client);
}
@Test
public void testChangeOwnerOnVolume() throws Exception {
TestVolume.runTestChangeOwnerOnVolume(client);
}
@Test
public void testChangeQuotaOnVolume() throws Exception {
TestVolume.runTestChangeQuotaOnVolume(client);
}
// TODO: remove @Ignore below once the problem has been resolved.
@Ignore("listVolumes not implemented in DistributedStorageHandler")
@Test
public void testListVolume() throws OzoneException, IOException {
TestVolume.runTestListVolume(client);
}
// TODO: remove @Ignore below once the problem has been resolved.
@Ignore("See TestVolume.testListVolumePagination()")
@Test
public void testListVolumePagination() throws OzoneException, IOException {
TestVolume.runTestListVolumePagination(client);
}
// TODO: remove @Ignore below once the problem has been resolved.
@Ignore("See TestVolume.testListAllVolumes()")
@Test
public void testListAllVolumes() throws Exception {
TestVolume.runTestListAllVolumes(client);
}
@Ignore("Disabling Ratis tests for pipeline work.")
@Test
public void testListVolumes() throws Exception {
TestVolume.runTestListVolumes(client);
}
}

View File

@ -1,22 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* REST client tests.
*/
package org.apache.hadoop.ozone.web.client;

View File

@ -1,22 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Rest Client Tests.
*/
package org.apache.hadoop.ozone.web;

View File

@ -1,126 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone</artifactId>
<version>0.5.0-SNAPSHOT</version>
</parent>
<artifactId>hadoop-ozone-objectstore-service</artifactId>
<version>0.5.0-SNAPSHOT</version>
<description>Apache Hadoop Ozone Object Store REST Service</description>
<name>Apache Hadoop Ozone Object Store REST Service</name>
<packaging>jar</packaging>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone-common</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone-client</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs-client</artifactId>
<scope>provided</scope>
<exclusions>
<exclusion>
<groupId>com.squareup.okhttp</groupId>
<artifactId>okhttp</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.swagger</groupId>
<artifactId>swagger-annotations</artifactId>
<version>1.5.9</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
<version>2.2.0</version>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
<version>3.1.1</version>
<configuration>
<shadedArtifactAttached>true</shadedArtifactAttached>
<shadedClassifierName>plugin</shadedClassifierName>
<artifactSet>
<includes>
<!-- artifacts with provided scope will be excluded anyway -->
<include>*:*:*:*</include>
</includes>
</artifactSet>
<filters>
<filter>
<artifact>*:*</artifact>
<excludes>
<exclude>META-INF/*.SF</exclude>
<exclude>META-INF/*.DSA</exclude>
<exclude>META-INF/*.RSA</exclude>
</excludes>
</filter>
</filters>
</configuration>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>shade</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@ -1,160 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
import org.apache.hadoop.hdds.tracing.TracingUtil;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.Client;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB;
import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB;
import org.apache.hadoop.ozone.web.ObjectStoreApplication;
import org.apache.hadoop.ozone.web.handlers.ServiceFilter;
import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
import org.apache.hadoop.ozone.web.netty.ObjectStoreJerseyContainer;
import org.apache.hadoop.ozone.web.storage.DistributedStorageHandler;
import org.apache.hadoop.security.UserGroupInformation;
import com.sun.jersey.api.container.ContainerFactory;
import com.sun.jersey.api.core.ApplicationAdapter;
import static com.sun.jersey.api.core.ResourceConfig.FEATURE_TRACE;
import static com.sun.jersey.api.core.ResourceConfig.PROPERTY_CONTAINER_REQUEST_FILTERS;
import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
import static org.apache.hadoop.ozone.OmUtils.getOmAddress;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_TRACE_ENABLED_DEFAULT;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_TRACE_ENABLED_KEY;
import org.apache.ratis.protocol.ClientId;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Implements object store handling within the DataNode process. This class is
* responsible for initializing and maintaining the RPC clients and servers and
* the web application required for the object store implementation.
*/
public final class ObjectStoreHandler implements Closeable {
private static final Logger LOG =
LoggerFactory.getLogger(ObjectStoreHandler.class);
private final ObjectStoreJerseyContainer objectStoreJerseyContainer;
private final OzoneManagerProtocol ozoneManagerClient;
private final StorageContainerLocationProtocol
storageContainerLocationClient;
private final StorageHandler storageHandler;
private ClientId clientId = ClientId.randomId();
/**
* Creates a new ObjectStoreHandler.
*
* @param conf configuration
* @throws IOException if there is an I/O error
*/
public ObjectStoreHandler(Configuration conf) throws IOException {
boolean ozoneTrace = conf.getBoolean(OZONE_TRACE_ENABLED_KEY,
OZONE_TRACE_ENABLED_DEFAULT);
// Initialize Jersey container for object store web application.
RPC.setProtocolEngine(conf, StorageContainerLocationProtocolPB.class,
ProtobufRpcEngine.class);
long scmVersion =
RPC.getProtocolVersion(StorageContainerLocationProtocolPB.class);
InetSocketAddress scmAddress =
getScmAddressForClients(conf);
this.storageContainerLocationClient =
TracingUtil.createProxy(
new StorageContainerLocationProtocolClientSideTranslatorPB(
RPC.getProxy(StorageContainerLocationProtocolPB.class,
scmVersion,
scmAddress, UserGroupInformation.getCurrentUser(), conf,
NetUtils.getDefaultSocketFactory(conf),
Client.getRpcTimeout(conf))),
StorageContainerLocationProtocol.class, conf);
RPC.setProtocolEngine(conf, OzoneManagerProtocolPB.class,
ProtobufRpcEngine.class);
long omVersion =
RPC.getProtocolVersion(OzoneManagerProtocolPB.class);
InetSocketAddress omAddress = getOmAddress(conf);
this.ozoneManagerClient =
TracingUtil.createProxy(
new OzoneManagerProtocolClientSideTranslatorPB(
RPC.getProxy(OzoneManagerProtocolPB.class, omVersion,
omAddress, UserGroupInformation.getCurrentUser(), conf,
NetUtils.getDefaultSocketFactory(conf),
Client.getRpcTimeout(conf)), clientId.toString()),
OzoneManagerProtocol.class, conf);
storageHandler = new DistributedStorageHandler(
new OzoneConfiguration(conf),
TracingUtil.createProxy(storageContainerLocationClient,
StorageContainerLocationProtocol.class, conf),
this.ozoneManagerClient);
ApplicationAdapter aa =
new ApplicationAdapter(new ObjectStoreApplication());
Map<String, Object> settingsMap = new HashMap<>();
settingsMap.put(PROPERTY_CONTAINER_REQUEST_FILTERS,
ServiceFilter.class.getCanonicalName());
settingsMap.put(FEATURE_TRACE, ozoneTrace);
aa.setPropertiesAndFeatures(settingsMap);
this.objectStoreJerseyContainer = ContainerFactory.createContainer(
ObjectStoreJerseyContainer.class, aa);
this.objectStoreJerseyContainer.setStorageHandler(storageHandler);
}
/**
* Returns the initialized web application container.
*
* @return initialized web application container
*/
public ObjectStoreJerseyContainer getObjectStoreJerseyContainer() {
return this.objectStoreJerseyContainer;
}
/**
* Returns the storage handler.
*
* @return returns the storage handler
*/
public StorageHandler getStorageHandler() {
return this.storageHandler;
}
@Override
public void close() {
LOG.info("Closing ObjectStoreHandler.");
storageHandler.close();
IOUtils.cleanupWithLogger(LOG, storageContainerLocationClient);
IOUtils.cleanupWithLogger(LOG, ozoneManagerClient);
}
}

View File

@ -1,22 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
/**
* Object store related service inside the datanode.
*/

View File

@ -1,222 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Request;
import javax.ws.rs.core.Response;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.List;
import java.util.Locale;
import java.util.TimeZone;
import java.util.UUID;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
import org.apache.hadoop.ozone.client.io.LengthInputStream;
import org.apache.hadoop.ozone.client.rest.OzoneException;
import org.apache.hadoop.ozone.client.rest.headers.Header;
import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
import org.apache.hadoop.ozone.web.handlers.UserArgs;
import org.apache.hadoop.util.Time;
import com.google.common.base.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Set of Utility functions used in ozone.
*/
@InterfaceAudience.Private
public final class OzoneRestUtils {
private static final Logger LOG = LoggerFactory.getLogger(
OzoneRestUtils.class);
private OzoneRestUtils() {
// Never constructed
}
/**
* Date format that used in ozone. Here the format is thread safe to use.
*/
private static final ThreadLocal<SimpleDateFormat> DATE_FORMAT =
new ThreadLocal<SimpleDateFormat>() {
@Override
protected SimpleDateFormat initialValue() {
SimpleDateFormat format = new SimpleDateFormat(
OzoneConsts.OZONE_DATE_FORMAT, Locale.US);
format.setTimeZone(TimeZone.getTimeZone(OzoneConsts.OZONE_TIME_ZONE));
return format;
}
};
/**
* verifies that bucket name / volume name is a valid DNS name.
*
* @param resName Bucket or volume Name to be validated
*
* @throws IllegalArgumentException
*/
public static void verifyResourceName(String resName)
throws IllegalArgumentException {
HddsClientUtils.verifyResourceName(resName);
}
/**
* Returns a random Request ID.
*
* Request ID is returned to the client as well as flows through the system
* facilitating debugging on why a certain request failed.
*
* @return String random request ID
*/
public static String getRequestID() {
return UUID.randomUUID().toString();
}
/**
* Basic validate routine to make sure that all the
* required headers are in place.
*
* @param request - http request
* @param headers - http headers
* @param reqId - request id
* @param resource - Resource Name
* @param hostname - Hostname
*
* @throws OzoneException
*/
public static void validate(Request request, HttpHeaders headers,
String reqId, String resource, String hostname)
throws OzoneException {
List<String> ozHeader =
headers.getRequestHeader(Header.OZONE_VERSION_HEADER);
if (ozHeader == null) {
throw ErrorTable
.newError(ErrorTable.MISSING_VERSION, reqId, resource, hostname);
}
List<String> date = headers.getRequestHeader(HttpHeaders.DATE);
if (date == null) {
throw ErrorTable
.newError(ErrorTable.MISSING_DATE, reqId, resource, hostname);
}
/*
TODO :
Ignore the results for time being. Eventually we can validate if the
request Date time is too skewed and reject if it is so.
*/
parseDate(date.get(0), reqId, resource, hostname);
}
/**
* Parses the Date String coming from the Users.
*
* @param dateString - Date String
* @param reqID - Ozone Request ID
* @param resource - Resource Name
* @param hostname - HostName
*
* @return - Date
*
* @throws OzoneException - in case of parsing error
*/
public static synchronized Date parseDate(String dateString, String reqID,
String resource, String hostname)
throws OzoneException {
try {
return DATE_FORMAT.get().parse(dateString);
} catch (ParseException ex) {
OzoneException exp =
ErrorTable.newError(ErrorTable.BAD_DATE, reqID, resource, hostname);
exp.setMessage(ex.getMessage());
throw exp;
}
}
/**
* Returns a response with appropriate OZONE headers and payload.
*
* @param args - UserArgs or Inherited class
* @param statusCode - HttpStatus code
* @param payload - Content Body
*
* @return JAX-RS Response
*/
public static Response getResponse(UserArgs args, int statusCode,
String payload) {
String date = DATE_FORMAT.get().format(new Date(Time.now()));
return Response.ok(payload)
.header(Header.OZONE_SERVER_NAME, args.getHostName())
.header(Header.OZONE_REQUEST_ID, args.getRequestID())
.header(HttpHeaders.DATE, date).status(statusCode).build();
}
/**
* Returns a response with appropriate OZONE headers and payload.
*
* @param args - UserArgs or Inherited class
* @param statusCode - HttpStatus code
* @param stream InputStream
*
* @return JAX-RS Response
*/
public static Response getResponse(UserArgs args, int statusCode,
LengthInputStream stream) {
String date = DATE_FORMAT.get().format(new Date(Time.now()));
return Response.ok(stream, MediaType.APPLICATION_OCTET_STREAM)
.header(Header.OZONE_SERVER_NAME, args.getHostName())
.header(Header.OZONE_REQUEST_ID, args.getRequestID())
.header(HttpHeaders.DATE, date).status(statusCode)
.header(HttpHeaders.CONTENT_LENGTH, stream.getLength())
.build();
}
/**
* Convert time in millisecond to a human readable format required in ozone.
* @return a human readable string for the input time
*/
public static String formatTime(long millis) {
return DATE_FORMAT.get().format(millis);
}
/**
* Convert time in ozone date format to millisecond.
* @return time in milliseconds
*/
public static long formatDate(String date) throws ParseException {
Preconditions.checkNotNull(date, "Date string should not be null.");
return DATE_FORMAT.get().parse(date).getTime();
}
}

View File

@ -1,22 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone;
/**
* Ozone related generic classes.
*/

View File

@ -1,59 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web;
import org.apache.hadoop.ozone.client.rest.OzoneExceptionMapper;
import org.apache.hadoop.ozone.web.handlers.BucketHandler;
import org.apache.hadoop.ozone.web.handlers.KeyHandler;
import org.apache.hadoop.ozone.web.handlers.ServiceFilter;
import org.apache.hadoop.ozone.web.handlers.VolumeHandler;
import org.apache.hadoop.ozone.web.messages.LengthInputStreamMessageBodyWriter;
import org.apache.hadoop.ozone.web.messages.StringMessageBodyWriter;
import javax.ws.rs.core.Application;
import java.util.HashSet;
import java.util.Set;
/**
* Ozone Application.
*/
public class ObjectStoreApplication extends Application {
public ObjectStoreApplication() {
super();
}
@Override
public Set<Class<?>> getClasses() {
HashSet<Class<?>> set = new HashSet<>();
set.add(BucketHandler.class);
set.add(VolumeHandler.class);
set.add(KeyHandler.class);
set.add(OzoneExceptionMapper.class);
set.add(LengthInputStreamMessageBodyWriter.class);
set.add(StringMessageBodyWriter.class);
return set;
}
@Override
public Set<Object> getSingletons() {
HashSet<Object> set = new HashSet<>();
set.add(ServiceFilter.class);
return set;
}
}

View File

@ -1,89 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.ozone.web;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
import org.apache.hadoop.ozone.HddsDatanodeService;
import org.apache.hadoop.ozone.web.netty.ObjectStoreRestHttpServer;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.util.ServicePlugin;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* DataNode service plugin implementation to start ObjectStore rest server.
*/
public class OzoneHddsDatanodeService implements ServicePlugin {
private static final Logger LOG =
LoggerFactory.getLogger(OzoneHddsDatanodeService.class);
private Configuration conf;
private ObjectStoreHandler handler;
private ObjectStoreRestHttpServer objectStoreRestHttpServer;
@Override
public void start(Object service) {
if (service instanceof HddsDatanodeService) {
try {
HddsDatanodeService hddsDatanodeService = (HddsDatanodeService) service;
conf = hddsDatanodeService.getConf();
handler = new ObjectStoreHandler(conf);
objectStoreRestHttpServer = new ObjectStoreRestHttpServer(
conf, null, handler);
objectStoreRestHttpServer.start();
DatanodeDetails.Port restPort = DatanodeDetails.newPort(
DatanodeDetails.Port.Name.REST,
objectStoreRestHttpServer.getHttpAddress().getPort());
hddsDatanodeService.getDatanodeDetails().setPort(restPort);
} catch (IOException e) {
throw new RuntimeException("Can't start the Object Store Rest server",
e);
}
} else {
LOG.error("Not starting {}, as the plugin is not invoked through {}",
OzoneHddsDatanodeService.class.getSimpleName(),
HddsDatanodeService.class.getSimpleName());
}
}
@Override
public void stop() {
try {
if (handler != null) {
handler.close();
}
} catch (Exception e) {
throw new RuntimeException("Can't stop the Object Store Rest server", e);
}
}
@Override
public void close() {
IOUtils.closeQuietly(objectStoreRestHttpServer);
IOUtils.closeQuietly(handler);
}
}

View File

@ -1,225 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.exceptions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ozone.client.rest.OzoneException;
import org.apache.hadoop.ozone.web.handlers.UserArgs;
import static java.net.HttpURLConnection.HTTP_BAD_REQUEST;
import static java.net.HttpURLConnection.HTTP_CONFLICT;
import static java.net.HttpURLConnection.HTTP_FORBIDDEN;
import static java.net.HttpURLConnection.HTTP_INTERNAL_ERROR;
import static java.net.HttpURLConnection.HTTP_NOT_FOUND;
import static java.net.HttpURLConnection.HTTP_UNAUTHORIZED;
/**
* Error Table represents the Errors from Ozone Rest API layer.
*
* Please note : The errors in this table are sorted by the HTTP_ERROR codes
* if you add new error codes to this table please follow the same convention.
*/
@InterfaceAudience.Private
public final class ErrorTable {
/* Error 400 */
public static final OzoneException MISSING_VERSION =
new OzoneException(HTTP_BAD_REQUEST, "missingVersion",
"x-ozone-version header is required.");
public static final OzoneException MISSING_DATE =
new OzoneException(HTTP_BAD_REQUEST, "missingDate",
"Date header is required.");
public static final OzoneException BAD_DATE =
new OzoneException(HTTP_BAD_REQUEST, "badDate",
"Unable to parse date format.");
public static final OzoneException MALFORMED_QUOTA =
new OzoneException(HTTP_BAD_REQUEST, "malformedQuota",
"Invalid quota specified.");
public static final OzoneException MALFORMED_ACL =
new OzoneException(HTTP_BAD_REQUEST, "malformedACL",
"Invalid ACL specified.");
public static final OzoneException INVALID_VOLUME_NAME =
new OzoneException(HTTP_BAD_REQUEST, "invalidVolumeName",
"Invalid volume name.");
public static final OzoneException INVALID_QUERY_PARAM =
new OzoneException(HTTP_BAD_REQUEST, "invalidQueryParam",
"Invalid query parameter.");
public static final OzoneException INVALID_RESOURCE_NAME =
new OzoneException(HTTP_BAD_REQUEST, "invalidResourceName",
"Invalid volume, bucket or key name.");
public static final OzoneException INVALID_BUCKET_NAME =
new OzoneException(HTTP_BAD_REQUEST, "invalidBucketName",
"Invalid bucket name.");
public static final OzoneException INVALID_KEY =
new OzoneException(HTTP_BAD_REQUEST, "invalidKey", "Invalid key.");
public static final OzoneException INVALID_REQUEST =
new OzoneException(HTTP_BAD_REQUEST, "invalidRequest",
"Error in request.");
public static final OzoneException MALFORMED_BUCKET_VERSION =
new OzoneException(HTTP_BAD_REQUEST, "malformedBucketVersion",
"Malformed bucket version or version not unique.");
public static final OzoneException MALFORMED_STORAGE_TYPE =
new OzoneException(HTTP_BAD_REQUEST, "malformedStorageType",
"Invalid storage Type specified.");
public static final OzoneException MALFORMED_STORAGE_CLASS =
new OzoneException(HTTP_BAD_REQUEST, "malformedStorageClass",
"Invalid storage class specified.");
public static final OzoneException BAD_DIGEST =
new OzoneException(HTTP_BAD_REQUEST, "badDigest",
"Content MD5 does not match.");
public static final OzoneException INCOMPLETE_BODY =
new OzoneException(HTTP_BAD_REQUEST, "incompleteBody",
"Content length does not match stream size.");
public static final OzoneException BAD_AUTHORIZATION =
new OzoneException(HTTP_BAD_REQUEST, "badAuthorization",
"Missing authorization or authorization has to be " +
"unique.");
public static final OzoneException BAD_PROPERTY =
new OzoneException(HTTP_BAD_REQUEST, "unknownProperty",
"This property is not supported by this server.");
/* Error 401 */
public static final OzoneException UNAUTHORIZED =
new OzoneException(HTTP_UNAUTHORIZED, "Unauthorized",
"Access token is missing or invalid token.");
/* Error 403 */
public static final OzoneException ACCESS_DENIED =
new OzoneException(HTTP_FORBIDDEN, "accessDenied", "Access denied.");
/* Error 404 */
public static final OzoneException USER_NOT_FOUND =
new OzoneException(HTTP_NOT_FOUND, "userNotFound", "Invalid user name.");
public static final OzoneException VOLUME_NOT_FOUND =
new OzoneException(HTTP_NOT_FOUND, "volumeNotFound", "No such volume.");
/* Error 409 */
public static final OzoneException VOLUME_ALREADY_EXISTS =
new OzoneException(HTTP_CONFLICT, "volumeAlreadyExists",
"Duplicate volume name.");
public static final OzoneException BUCKET_ALREADY_EXISTS =
new OzoneException(HTTP_CONFLICT, "bucketAlreadyExists",
"Duplicate bucket name.");
public static final OzoneException VOLUME_NOT_EMPTY =
new OzoneException(HTTP_CONFLICT, "volumeNotEmpty",
"Volume must not have any buckets.");
public static final OzoneException BUCKET_NOT_EMPTY =
new OzoneException(HTTP_CONFLICT, "bucketNotEmpty",
"Bucket must not have any keys.");
public static final OzoneException KEY_OPERATION_CONFLICT =
new OzoneException(HTTP_CONFLICT, "keyOperationConflict",
"Conflicting operation on the specified key is going" +
" on.");
/* Error 500 */
public static final OzoneException SERVER_ERROR =
new OzoneException(HTTP_INTERNAL_ERROR, "internalServerError",
"Internal server error.");
/**
* Create a new instance of Error.
*
* @param e Error Template
* @param requestID Request ID
* @param resource Resource Name
* @param hostID hostID
*
* @return creates a new instance of error based on the template
*/
public static OzoneException newError(OzoneException e, String requestID,
String resource, String hostID) {
OzoneException err =
new OzoneException(e.getHttpCode(), e.getShortMessage(),
e.getMessage());
err.setRequestId(requestID);
err.setResource(resource);
err.setHostID(hostID);
return err;
}
/**
* Create new instance of Error.
*
* @param e - Error Template
* @param args - Args
*
* @return Ozone Exception
*/
public static OzoneException newError(OzoneException e, UserArgs args) {
OzoneException err =
new OzoneException(e.getHttpCode(), e.getShortMessage(),
e.getMessage());
if (args != null) {
err.setRequestId(args.getRequestID());
err.setResource(args.getResourceName());
err.setHostID(args.getHostName());
}
return err;
}
/**
* Create new instance of Error.
*
* @param e - Error Template
* @param args - Args
* @param ex Exception
*
* @return Ozone Exception
*/
public static OzoneException newError(OzoneException e, UserArgs args,
Exception ex) {
OzoneException err =
new OzoneException(e.getHttpCode(), e.getShortMessage(), ex);
if(args != null) {
err.setRequestId(args.getRequestID());
err.setResource(args.getResourceName());
err.setHostID(args.getHostName());
}
err.setMessage(ex.getMessage());
return err;
}
private ErrorTable() {
// Never constructed.
}
}

View File

@ -1,22 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.exceptions;
/**
This package contains ozone client side libraries.
*/

View File

@ -1,190 +0,0 @@
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.hadoop.ozone.web.handlers;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.OzoneRestUtils;
import org.apache.hadoop.ozone.client.rest.OzoneException;
import org.apache.hadoop.ozone.client.rest.headers.Header;
import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
import org.apache.hadoop.ozone.web.interfaces.Bucket;
import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
import org.slf4j.MDC;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.Request;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriInfo;
import java.io.IOException;
import static java.net.HttpURLConnection.HTTP_CREATED;
import static java.net.HttpURLConnection.HTTP_OK;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_FUNCTION;
/**
* Bucket Class handles all ozone Bucket related actions.
*/
public class BucketHandler implements Bucket {
/**
* createBucket call handles the POST request for Creating a Bucket.
*
* @param volume - Volume name
* @param bucket - Bucket Name
* @param req - Http request
* @param info - Uri Info
* @param headers - Http headers
*
* @return Response
*
* @throws OzoneException
*/
@Override
public Response createBucket(String volume, String bucket, Request req,
UriInfo info, HttpHeaders headers)
throws OzoneException {
MDC.put(OZONE_FUNCTION, "createBucket");
return new BucketProcessTemplate() {
@Override
public Response doProcess(BucketArgs args)
throws OzoneException, IOException {
StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
args.setVersioning(getVersioning(args));
args.setStorageType(getStorageType(args));
fs.createBucket(args);
return OzoneRestUtils.getResponse(args, HTTP_CREATED, "");
}
}.handleCall(volume, bucket, req, info, headers);
}
/**
* updateBucket call handles the PUT request for updating a Bucket.
*
* There are only three possible actions currently with updateBucket.
* They are add/remove on ACLS, Bucket Versioning and StorageType.
* if you make a call with any other action, update just returns 200 OK.
*
* @param volume - Storage volume name
* @param bucket - Bucket name
* @param req - Http request
* @param info - Uri Info
* @param headers - Http headers
*
* @return Response
*
* @throws OzoneException
*/
@Override
public Response updateBucket(String volume, String bucket, Request req,
UriInfo info, HttpHeaders headers)
throws OzoneException {
MDC.put(OZONE_FUNCTION, "updateBucket");
return new BucketProcessTemplate() {
@Override
public Response doProcess(BucketArgs args)
throws OzoneException, IOException {
StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
args.setVersioning(getVersioning(args));
args.setStorageType(getStorageType(args));
if (args.getVersioning() != OzoneConsts.Versioning.NOT_DEFINED) {
fs.setBucketVersioning(args);
}
if (args.getStorageType() != null) {
fs.setBucketStorageClass(args);
}
return OzoneRestUtils.getResponse(args, HTTP_OK, "");
}
}.handleCall(volume, bucket, req, info, headers);
}
/**
* Deletes an empty bucket.
*
* @param volume Volume name
* @param bucket Bucket Name
* @param req - Http request
* @param info - Uri Info
* @param headers - Http headers
*
* @return Response
*
* @throws OzoneException
*/
@Override
public Response deleteBucket(String volume, String bucket, Request req,
UriInfo info, HttpHeaders headers)
throws OzoneException {
MDC.put(OZONE_FUNCTION, "deleteBucket");
return new BucketProcessTemplate() {
@Override
public Response doProcess(BucketArgs args)
throws OzoneException, IOException {
StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
fs.deleteBucket(args);
return OzoneRestUtils.getResponse(args, HTTP_OK, "");
}
}.handleCall(volume, bucket, req, info, headers);
}
/**
* List Buckets allows the user to list the bucket.
*
* @param volume - Storage Volume Name
* @param bucket - Bucket Name
* @param info - Uri Info
* @param prefix - Prefix for the keys to be fetched
* @param maxKeys - MaxNumber of Keys to Return
* @param startPage - Continuation Token
* @param req - Http request
* @param headers - Http headers
*
* @return - Json Body
*
* @throws OzoneException
*/
@Override
public Response listBucket(String volume, String bucket, final String info,
final String prefix, final int maxKeys,
final String startPage, Request req,
UriInfo uriInfo, HttpHeaders headers)
throws OzoneException {
MDC.put(OZONE_FUNCTION, "listBucket");
return new BucketProcessTemplate() {
@Override
public Response doProcess(BucketArgs args)
throws OzoneException, IOException {
switch (info) {
case Header.OZONE_INFO_QUERY_KEY:
ListArgs listArgs = new ListArgs(args, prefix, maxKeys, startPage);
return getBucketKeysList(listArgs);
case Header.OZONE_INFO_QUERY_BUCKET:
return getBucketInfoResponse(args);
default:
OzoneException ozException =
ErrorTable.newError(ErrorTable.INVALID_QUERY_PARAM, args);
ozException.setMessage("Unrecognized query param : " + info);
throw ozException;
}
}
}.handleCall(volume, bucket, req, uriInfo, headers);
}
}

View File

@ -1,294 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.handlers;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.Request;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriInfo;
import java.io.IOException;
import java.nio.file.DirectoryNotEmptyException;
import java.nio.file.FileAlreadyExistsException;
import java.nio.file.NoSuchFileException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.hdds.protocol.StorageType;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.OzoneRestUtils;
import org.apache.hadoop.ozone.client.rest.OzoneException;
import org.apache.hadoop.ozone.client.rest.headers.Header;
import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
import org.apache.hadoop.ozone.web.interfaces.UserAuth;
import org.apache.hadoop.ozone.web.response.BucketInfo;
import org.apache.hadoop.ozone.web.response.ListKeys;
import org.apache.hadoop.ozone.web.utils.OzoneUtils;
import static java.net.HttpURLConnection.HTTP_OK;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_COMPONENT;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_REQUEST;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_RESOURCE;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_USER;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.MDC;
/**
* This class abstracts way the repetitive tasks in
* Bucket handling code.
*/
public abstract class BucketProcessTemplate {
private static final Logger LOG =
LoggerFactory.getLogger(BucketProcessTemplate.class);
/**
* This function serves as the common error handling function
* for all bucket related operations.
*
* @param volume - Volume Name
* @param bucket - Bucket Name
* @param request - Http Request
* @param uriInfo - Http Uri
* @param headers - Http Headers
*
* @return Response
*
* @throws OzoneException
*/
public Response handleCall(String volume, String bucket, Request request,
UriInfo uriInfo, HttpHeaders headers)
throws OzoneException {
// TODO : Add logging
String reqID = OzoneUtils.getRequestID();
String hostName = OzoneUtils.getHostName();
MDC.put(OZONE_COMPONENT, "ozone");
MDC.put(OZONE_REQUEST, reqID);
UserArgs userArgs = null;
try {
userArgs = new UserArgs(reqID, hostName, request, uriInfo, headers);
OzoneRestUtils.validate(request, headers, reqID, bucket, hostName);
OzoneUtils.verifyResourceName(bucket);
UserAuth auth = UserHandlerBuilder.getAuthHandler();
userArgs.setUserName(auth.getUser(userArgs));
MDC.put(OZONE_USER, userArgs.getUserName());
BucketArgs args = new BucketArgs(volume, bucket, userArgs);
MDC.put(OZONE_RESOURCE, args.getResourceName());
Response response = doProcess(args);
LOG.debug("Success");
MDC.clear();
return response;
} catch (IllegalArgumentException argEx) {
LOG.error("Invalid bucket.", argEx);
throw ErrorTable.newError(ErrorTable.INVALID_BUCKET_NAME, userArgs,
argEx);
} catch (IOException fsExp) {
handleIOException(bucket, reqID, hostName, fsExp);
}
return null;
}
/**
* Converts FileSystem IO exceptions to OZONE exceptions.
*
* @param bucket Name of the bucket
* @param reqID Request ID
* @param hostName Machine Name
* @param fsExp Exception
*
* @throws OzoneException
*/
void handleIOException(String bucket, String reqID, String hostName,
IOException fsExp) throws OzoneException {
LOG.error("IOException:", fsExp);
OzoneException exp = null;
if (fsExp instanceof FileAlreadyExistsException) {
exp = ErrorTable
.newError(ErrorTable.BUCKET_ALREADY_EXISTS, reqID, bucket, hostName);
}
if (fsExp instanceof DirectoryNotEmptyException) {
exp = ErrorTable
.newError(ErrorTable.BUCKET_NOT_EMPTY, reqID, bucket, hostName);
}
if (fsExp instanceof NoSuchFileException) {
exp = ErrorTable
.newError(ErrorTable.INVALID_BUCKET_NAME, reqID, bucket, hostName);
}
// Default we don't handle this exception yet,
// report a Server Internal Error.
if (exp == null) {
exp =
ErrorTable.newError(ErrorTable.SERVER_ERROR, reqID, bucket, hostName);
if (fsExp != null) {
exp.setMessage(fsExp.getMessage());
}
}
throw exp;
}
/**
* Abstract function that gets implemented in the BucketHandler functions.
* This function will just deal with the core file system related logic
* and will rely on handleCall function for repetitive error checks
*
* @param args - parsed bucket args, name, userName, ACLs etc
*
* @return Response
*
* @throws OzoneException
* @throws IOException
*/
public abstract Response doProcess(BucketArgs args)
throws OzoneException, IOException;
/**
* Returns the ACL String if available.
* This function ignores all ACLs that are not prefixed with either
* ADD or Remove
*
* @param args - BucketArgs
* @param tag - Tag for different type of acls
*
* @return List of ACLs
*
*/
List<String> getAcls(BucketArgs args, String tag) {
List<String> aclStrings =
args.getHeaders().getRequestHeader(Header.OZONE_ACLS);
List<String> filteredSet = null;
if (aclStrings != null) {
filteredSet = new ArrayList<>();
for (String s : aclStrings) {
if (s.startsWith(tag)) {
filteredSet.add(s.replaceFirst(tag, ""));
}
}
}
return filteredSet;
}
/**
* Returns bucket versioning Info.
*
* @param args - BucketArgs
*
* @return - String
*
* @throws OzoneException
*/
OzoneConsts.Versioning getVersioning(BucketArgs args) throws OzoneException {
List<String> versionStrings =
args.getHeaders().getRequestHeader(Header.OZONE_BUCKET_VERSIONING);
if (versionStrings == null) {
return null;
}
if (versionStrings.size() > 1) {
OzoneException ex =
ErrorTable.newError(ErrorTable.MALFORMED_BUCKET_VERSION, args);
ex.setMessage("Exactly one bucket version header required");
throw ex;
}
String version = versionStrings.get(0);
try {
return OzoneConsts.Versioning.valueOf(version);
} catch (IllegalArgumentException ex) {
LOG.debug("Malformed Version. version: {}", version);
throw ErrorTable.newError(ErrorTable.MALFORMED_BUCKET_VERSION, args, ex);
}
}
/**
* Returns Storage Class if Available or returns Default.
*
* @param args - bucketArgs
*
* @return StorageType
*
* @throws OzoneException
*/
StorageType getStorageType(BucketArgs args) throws OzoneException {
List<String> storageClassString = null;
try {
storageClassString =
args.getHeaders().getRequestHeader(Header.OZONE_STORAGE_TYPE);
if (storageClassString == null) {
return null;
}
if (storageClassString.size() > 1) {
OzoneException ex =
ErrorTable.newError(ErrorTable.MALFORMED_STORAGE_TYPE, args);
ex.setMessage("Exactly one storage class header required");
throw ex;
}
return StorageType.valueOf(storageClassString.get(0).toUpperCase());
} catch (IllegalArgumentException ex) {
if(storageClassString != null) {
LOG.debug("Malformed storage type. Type: {}",
storageClassString.get(0).toUpperCase());
}
throw ErrorTable.newError(ErrorTable.MALFORMED_STORAGE_TYPE, args, ex);
}
}
/**
* Returns BucketInfo response.
*
* @param args - BucketArgs
*
* @return BucketInfo
*
* @throws IOException
* @throws OzoneException
*/
Response getBucketInfoResponse(BucketArgs args)
throws IOException, OzoneException {
StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
BucketInfo info = fs.getBucketInfo(args);
return OzoneRestUtils.getResponse(args, HTTP_OK, info.toJsonString());
}
/**
* Returns list of objects in a bucket.
* @param args - ListArgs
* @return Response
* @throws IOException
* @throws OzoneException
*/
Response getBucketKeysList(ListArgs args) throws IOException, OzoneException {
StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
ListKeys objects = fs.listKeys(args);
return OzoneRestUtils.getResponse(args.getArgs(), HTTP_OK,
objects.toJsonString());
}
}

View File

@ -1,302 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.handlers;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.Request;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriInfo;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import org.apache.hadoop.ozone.OzoneRestUtils;
import org.apache.hadoop.ozone.client.io.LengthInputStream;
import org.apache.hadoop.ozone.client.rest.OzoneException;
import org.apache.hadoop.ozone.client.rest.headers.Header;
import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
import org.apache.hadoop.ozone.web.interfaces.Keys;
import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
import org.apache.hadoop.ozone.web.response.KeyInfo;
import static java.net.HttpURLConnection.HTTP_CREATED;
import static java.net.HttpURLConnection.HTTP_OK;
import org.apache.commons.codec.binary.Hex;
/**
* KeyHandler deals with basic Key Operations.
*/
public class KeyHandler implements Keys {
/**
* Gets the Key/key information if it exists.
*
* @param volume Storage Volume
* @param bucket Name of the bucket
* @param key Name of the key
* @param info Tag info
* @param req Request
* @param uriInfo Uri Info
* @param headers Http Header
* @return Response
* @throws OzoneException
*/
@Override
public Response getKey(String volume, String bucket, String key, String info,
Request req, UriInfo uriInfo, HttpHeaders headers)
throws OzoneException {
return new KeyProcessTemplate() {
/**
* Abstract function that gets implemented in the KeyHandler functions.
* This function will just deal with the core file system related logic
* and will rely on handleCall function for repetitive error checks
*
* @param args - parsed bucket args, name, userName, ACLs etc
* @param input - The body as an Input Stream
* @param request - Http request
* @param headers - Parsed http Headers.
* @param uriInfo - UriInfo
*
* @return Response
*
* @throws IOException - From the file system operations
*/
@Override
public Response doProcess(KeyArgs args, InputStream input,
Request request, HttpHeaders headers,
UriInfo uriInfo)
throws IOException, OzoneException, NoSuchAlgorithmException {
if (info == null) {
return getKey(args);
} else if (info.equals(Header.OZONE_INFO_QUERY_KEY)) {
return getKeyInfo(args);
} else if (info.equals(Header.OZONE_INFO_QUERY_KEY_DETAIL)) {
return getKeyInfoDetail(args);
}
OzoneException ozException = ErrorTable
.newError(ErrorTable.INVALID_QUERY_PARAM, args);
ozException.setMessage("Unrecognized query param : " + info);
throw ozException;
}
}.handleCall(volume, bucket, key, req, headers, uriInfo, null);
}
/**
* Gets the Key if it exists.
*/
private Response getKey(KeyArgs args)
throws IOException, OzoneException {
StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
LengthInputStream stream = fs.newKeyReader(args);
return OzoneRestUtils.getResponse(args, HTTP_OK, stream);
}
/**
* Gets the Key information if it exists.
*/
private Response getKeyInfo(KeyArgs args)
throws IOException, OzoneException {
StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
KeyInfo keyInfo = fs.getKeyInfo(args);
return OzoneRestUtils.getResponse(args, HTTP_OK, keyInfo.toJsonString());
}
/**
* Gets the Key detail information if it exists.
*/
private Response getKeyInfoDetail(KeyArgs args)
throws IOException, OzoneException {
StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
KeyInfo keyInfo = fs.getKeyInfoDetails(args);
return OzoneRestUtils.getResponse(args, HTTP_OK, keyInfo.toJsonString());
}
/**
* Adds a key to an existing bucket. If the object already exists this call
* will overwrite or add with new version number if the bucket versioning is
* turned on.
*
* @param volume Storage Volume Name
* @param bucket Name of the bucket
* @param keys Name of the Object
* @param is InputStream or File Data
* @param req Request
* @param info - UriInfo
* @param headers http headers
* @return Response
* @throws OzoneException
*/
@Override
public Response putKey(String volume, String bucket, String keys,
InputStream is, Request req, UriInfo info,
HttpHeaders headers) throws OzoneException {
return new KeyProcessTemplate() {
/**
* Abstract function that gets implemented in the KeyHandler functions.
* This function will just deal with the core file system related logic
* and will rely on handleCall function for repetitive error checks
*
* @param args - parsed bucket args, name, userName, ACLs etc
* @param input - The body as an Input Stream
* @param request - Http request
* @param headers - Parsed http Headers.
* @param info - UriInfo
*
* @return Response
*
* @throws IOException - From the file system operations
*/
@Override
public Response doProcess(KeyArgs args, InputStream input,
Request request, HttpHeaders headers,
UriInfo info)
throws IOException, OzoneException, NoSuchAlgorithmException {
final int eof = -1;
StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
byte[] buffer = new byte[4 * 1024];
String contentLenString = getContentLength(headers, args);
String newLen = contentLenString.replaceAll("\"", "");
int contentLen = Integer.parseInt(newLen);
args.setSize(contentLen);
MessageDigest md5 = MessageDigest.getInstance("MD5");
int bytesRead = 0;
int len = 0;
OutputStream stream = fs.newKeyWriter(args);
while ((bytesRead < contentLen) && (len != eof)) {
int readSize =
(contentLen - bytesRead > buffer.length) ? buffer.length :
contentLen - bytesRead;
len = input.read(buffer, 0, readSize);
if (len != eof) {
stream.write(buffer, 0, len);
md5.update(buffer, 0, len);
bytesRead += len;
}
}
checkFileLengthMatch(args, fs, contentLen, bytesRead);
String hashString = Hex.encodeHexString(md5.digest());
// TODO : Enable hash value checking.
// String contentHash = getContentMD5(headers, args);
// checkFileHashMatch(args, hashString, fs, contentHash);
args.setHash(hashString);
args.setSize(bytesRead);
fs.commitKey(args, stream);
return OzoneRestUtils.getResponse(args, HTTP_CREATED, "");
}
}.handleCall(volume, bucket, keys, req, headers, info, is);
}
/**
* Deletes an existing key.
*
* @param volume Storage Volume Name
* @param bucket Name of the bucket
* @param keys Name of the Object
* @param req http Request
* @param info - UriInfo
* @param headers HttpHeaders
* @return Response
* @throws OzoneException
*/
@Override
public Response deleteKey(String volume, String bucket, String keys,
Request req, UriInfo info, HttpHeaders headers)
throws OzoneException {
return new KeyProcessTemplate() {
/**
* Abstract function that gets implemented in the KeyHandler functions.
* This function will just deal with the core file system related logic
* and will rely on handleCall function for repetitive error checks
*
* @param args - parsed bucket args, name, userName, ACLs etc
* @param input - The body as an Input Stream
* @param request - Http request
* @param headers - Parsed http Headers.
* @param info - UriInfo
*
* @return Response
*
* @throws IOException - From the file system operations
*/
@Override
public Response doProcess(KeyArgs args, InputStream input,
Request request, HttpHeaders headers,
UriInfo info)
throws IOException, OzoneException, NoSuchAlgorithmException {
StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
fs.deleteKey(args);
return OzoneRestUtils.getResponse(args, HTTP_OK, "");
}
}.handleCall(volume, bucket, keys, req, headers, info, null);
}
/**
* Renames an existing key within a bucket.
*
* @param volume Storage Volume Name
* @param bucket Name of the bucket
* @param key Name of the Object
* @param toKeyName New name of the Object
* @param req http Request
* @param info UriInfo
* @param headers HttpHeaders
* @return Response
* @throws OzoneException
*/
@Override
public Response renameKey(String volume, String bucket, String key,
String toKeyName, Request req, UriInfo info, HttpHeaders headers)
throws OzoneException {
return new KeyProcessTemplate() {
/**
* Abstract function that gets implemented in the KeyHandler functions.
* This function will just deal with the core file system related logic
* and will rely on handleCall function for repetitive error checks
*
* @param args - parsed bucket args, name, userName, ACLs etc
* @param input - The body as an Input Stream
* @param request - Http request
* @param headers - Parsed http Headers.
* @param info - UriInfo
*
* @return Response
*
* @throws IOException - From the file system operations
*/
@Override
public Response doProcess(KeyArgs args, InputStream input,
Request request, HttpHeaders headers,
UriInfo info)
throws IOException, OzoneException, NoSuchAlgorithmException {
StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
fs.renameKey(args, toKeyName);
return OzoneRestUtils.getResponse(args, HTTP_OK, "");
}
}.handleCall(volume, bucket, key, req, headers, info, null);
}
}

View File

@ -1,235 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.handlers;
import org.apache.commons.codec.binary.Base64;
import org.apache.hadoop.ozone.OzoneRestUtils;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
import org.apache.hadoop.ozone.client.rest.OzoneException;
import org.apache.hadoop.ozone.client.rest.headers.Header;
import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
import org.apache.hadoop.ozone.web.interfaces.UserAuth;
import org.apache.hadoop.ozone.web.utils.OzoneUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.MDC;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.Request;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriInfo;
import java.io.IOException;
import java.io.InputStream;
import java.security.NoSuchAlgorithmException;
import java.util.List;
import static org.apache.hadoop.ozone.web.exceptions.ErrorTable.BAD_DIGEST;
import static org.apache.hadoop.ozone.web.exceptions.ErrorTable.INCOMPLETE_BODY;
import static org.apache.hadoop.ozone.web.exceptions.ErrorTable.INVALID_BUCKET_NAME;
import static org.apache.hadoop.ozone.web.exceptions.ErrorTable.INVALID_REQUEST;
import static org.apache.hadoop.ozone.web.exceptions.ErrorTable.SERVER_ERROR;
import static org.apache.hadoop.ozone.web.exceptions.ErrorTable.newError;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_COMPONENT;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_RESOURCE;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_REQUEST;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_USER;
/**
* This class abstracts way the repetitive tasks in Key handling code.
*/
public abstract class KeyProcessTemplate {
private static final Logger LOG =
LoggerFactory.getLogger(KeyProcessTemplate.class);
/**
* This function serves as the common error handling function for all Key
* related operations.
*
* @param bucket bucket Name
* @param key the object name
* @param headers Http headers
* @param is Input XML stream
* @throws OzoneException
*/
public Response handleCall(String volume, String bucket, String key,
Request request, HttpHeaders headers, UriInfo info,
InputStream is) throws OzoneException {
String reqID = OzoneUtils.getRequestID();
String hostName = OzoneUtils.getHostName();
MDC.put(OZONE_COMPONENT, "ozone");
MDC.put(OZONE_REQUEST, reqID);
UserArgs userArgs = null;
try {
userArgs = new UserArgs(reqID, hostName, request, info, headers);
OzoneRestUtils.validate(request, headers, reqID, bucket, hostName);
OzoneUtils.verifyResourceName(bucket);
UserAuth auth = UserHandlerBuilder.getAuthHandler();
userArgs.setUserName(auth.getUser(userArgs));
MDC.put(OZONE_USER, userArgs.getUserName());
KeyArgs args = new KeyArgs(volume, bucket, key, userArgs);
MDC.put(OZONE_RESOURCE, args.getResourceName());
Response response = doProcess(args, is, request, headers, info);
LOG.debug("Success");
MDC.clear();
return response;
} catch (IllegalArgumentException argExp) {
LOG.error("Invalid bucket in key call.", argExp);
throw newError(INVALID_BUCKET_NAME, userArgs, argExp);
} catch (IOException fsExp) {
LOG.error("IOException:", fsExp);
// Map KEY_NOT_FOUND to INVALID_KEY
if (fsExp.getMessage().endsWith(
OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND.name())) {
throw ErrorTable.newError(ErrorTable.INVALID_KEY, userArgs, fsExp);
}
// TODO : Handle errors from the FileSystem , let us map to server error
// for now.
throw ErrorTable.newError(ErrorTable.SERVER_ERROR, userArgs, fsExp);
} catch (NoSuchAlgorithmException algoEx) {
LOG.error("NoSuchAlgorithmException. Probably indicates an unusual java "
+ "installation.", algoEx);
throw ErrorTable.newError(SERVER_ERROR, userArgs, algoEx);
}
}
/**
* Abstract function that gets implemented in the KeyHandler functions. This
* function will just deal with the core file system related logic and will
* rely on handleCall function for repetitive error checks
*
* @param args - parsed bucket args, name, userName, ACLs etc
* @param input - The body as an Input Stream
* @param request - Http request
* @param headers - Parsed http Headers.
* @param info - UriInfo
* @return Response
* @throws IOException - From the file system operations
*/
public abstract Response doProcess(KeyArgs args, InputStream input,
Request request, HttpHeaders headers,
UriInfo info)
throws IOException, OzoneException, NoSuchAlgorithmException;
/**
* checks if the File Content-MD5 we wrote matches the hash we computed from
* the stream. if it does match we delete the file and throw and exception to
* let the user know that we have a hash mismatch
*
* @param args Object Args
* @param computedString MD5 hash value
* @param fs Pointer to File System so we can delete the file
* @param contentHash User Specified hash string
* @throws IOException
* @throws OzoneException
*/
public void checkFileHashMatch(KeyArgs args, String computedString,
StorageHandler fs, String contentHash)
throws IOException, OzoneException {
if (contentHash != null) {
String contentString =
new String(Base64.decodeBase64(contentHash), OzoneUtils.ENCODING)
.trim();
if (!contentString.equals(computedString)) {
fs.deleteKey(args);
OzoneException ex = ErrorTable.newError(BAD_DIGEST, args);
String msg = String.format("MD5 Digest mismatch. Expected %s Found " +
"%s", contentString, computedString);
ex.setMessage(msg);
LOG.debug(msg);
throw ex;
}
}
}
/**
* check if the content-length matches the actual stream length. if we find a
* mismatch we will delete the file and throw an exception to let the user
* know that length mismatch detected
*
* @param args Object Args
* @param fs Pointer to File System Object, to delete the file that we
* wrote
* @param contentLen Http Content-Length Header
* @param bytesRead Actual Bytes we read from the stream
* @throws IOException
* @throws OzoneException
*/
public void checkFileLengthMatch(KeyArgs args, StorageHandler fs,
int contentLen, int bytesRead)
throws IOException, OzoneException {
if (bytesRead != contentLen) {
fs.deleteKey(args);
OzoneException ex = ErrorTable.newError(INCOMPLETE_BODY, args);
String msg = String.format("Body length mismatch. Expected length : %d" +
" Found %d", contentLen, bytesRead);
ex.setMessage(msg);
LOG.debug(msg);
throw ex;
}
}
/**
* Returns Content Length header value if available.
*
* @param headers - Http Headers
* @return - String or null
*/
public String getContentLength(HttpHeaders headers, KeyArgs args)
throws OzoneException {
List<String> contentLengthList =
headers.getRequestHeader(HttpHeaders.CONTENT_LENGTH);
if ((contentLengthList != null) && (contentLengthList.size() > 0)) {
return contentLengthList.get(0);
}
OzoneException ex = ErrorTable.newError(INVALID_REQUEST, args);
ex.setMessage("Content-Length is a required header for putting a key.");
throw ex;
}
/**
* Returns Content MD5 value if available.
*
* @param headers - Http Headers
* @return - String or null
*/
public String getContentMD5(HttpHeaders headers, KeyArgs args) {
List<String> contentLengthList =
headers.getRequestHeader(Header.CONTENT_MD5);
if ((contentLengthList != null) && (contentLengthList.size() > 0)) {
return contentLengthList.get(0);
}
// TODO : Should we make this compulsory ?
// OzoneException ex = ErrorTable.newError(ErrorTable.invalidRequest, args);
// ex.setMessage("Content-MD5 is a required header for putting a key");
// throw ex;
return "";
}
}

View File

@ -1,61 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.handlers;
import com.sun.jersey.spi.container.ContainerRequest;
import com.sun.jersey.spi.container.ContainerRequestFilter;
import org.apache.hadoop.ozone.client.rest.headers.Header;
import javax.ws.rs.core.UriBuilder;
import javax.ws.rs.ext.Provider;
/**
* This class is used to intercept root URL requests and route it to
* Volume List functionality.
*/
@Provider
public class ServiceFilter implements ContainerRequestFilter {
/**
* Filter the request.
* <p>
* An implementation may modify the state of the request or
* create a new instance.
*
* @param request the request.
*
* @return the request.
*/
@Override
public ContainerRequest filter(ContainerRequest request) {
if (request.getRequestUri().getPath().length() > 1) {
return request;
}
// Just re-route it to volume handler with some hypothetical volume name.
// volume name is ignored.
request.setUris(request.getBaseUri(),
UriBuilder.fromUri(request.getRequestUri())
.path("/service")
.queryParam("info", Header.OZONE_LIST_QUERY_SERVICE)
.build());
return request;
}
}

View File

@ -1,80 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.handlers;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
/**
* This class is responsible for providing a {@link StorageHandler}
* implementation to object store web handlers.
*/
@InterfaceAudience.Private
public final class StorageHandlerBuilder {
private static final Logger LOG =
LoggerFactory.getLogger(StorageHandlerBuilder.class);
private static final ThreadLocal<StorageHandler>
STORAGE_HANDLER_THREAD_LOCAL = new ThreadLocal<>();
/**
* Returns the configured StorageHandler from thread-local storage for this
* thread.
*
* @return StorageHandler from thread-local storage
*/
public static StorageHandler getStorageHandler() throws IOException {
StorageHandler storageHandler = STORAGE_HANDLER_THREAD_LOCAL.get();
if (storageHandler != null) {
return storageHandler;
} else {
LOG.error("No Storage Handler Configured.");
throw new IOException("Invalid Handler Configuration");
}
}
/**
* Removes the configured StorageHandler from thread-local storage for this
* thread.
*/
public static void removeStorageHandler() {
STORAGE_HANDLER_THREAD_LOCAL.remove();
}
/**
* Sets the configured StorageHandler in thread-local storage for this thread.
*
* @param storageHandler StorageHandler to set in thread-local storage
*/
public static void setStorageHandler(StorageHandler storageHandler) {
STORAGE_HANDLER_THREAD_LOCAL.set(storageHandler);
}
/**
* There is no reason to instantiate this class.
*/
private StorageHandlerBuilder() {
}
}

View File

@ -1,75 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.handlers;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ozone.web.interfaces.UserAuth;
import org.apache.hadoop.ozone.web.userauth.Simple;
/**
* This class is responsible for providing a
* {@link org.apache.hadoop.ozone.web.interfaces.UserAuth}
* implementation to object store web handlers.
*/
@InterfaceAudience.Private
public final class UserHandlerBuilder {
private static final ThreadLocal<UserAuth> USER_AUTH_THREAD_LOCAL =
new ThreadLocal<UserAuth>();
/**
* Returns the configured UserAuth from thread-local storage for this
* thread.
*
* @return UserAuth from thread-local storage
*/
public static UserAuth getAuthHandler() {
UserAuth authHandler = USER_AUTH_THREAD_LOCAL.get();
if (authHandler != null) {
return authHandler;
} else {
// This only happens while using mvn jetty:run for testing.
return new Simple();
}
}
/**
* Removes the configured UserAuth from thread-local storage for this
* thread.
*/
public static void removeAuthHandler() {
USER_AUTH_THREAD_LOCAL.remove();
}
/**
* Sets the configured UserAuthHandler in thread-local storage for this
* thread.
*
* @param authHandler authHandler to set in thread-local storage
*/
public static void setAuthHandler(UserAuth authHandler) {
USER_AUTH_THREAD_LOCAL.set(authHandler);
}
/**
* There is no reason to instantiate this class.
*/
private UserHandlerBuilder() {
}
}

View File

@ -1,274 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.handlers;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.Request;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriInfo;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ozone.OzoneRestUtils;
import org.apache.hadoop.ozone.client.rest.OzoneException;
import org.apache.hadoop.ozone.client.rest.headers.Header;
import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
import org.apache.hadoop.ozone.web.interfaces.UserAuth;
import org.apache.hadoop.ozone.web.interfaces.Volume;
import static java.net.HttpURLConnection.HTTP_CREATED;
import static java.net.HttpURLConnection.HTTP_OK;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_FUNCTION;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.MDC;
/**
* VolumeHandler handles volume specific HTTP calls.
*
* Most functions in this file follow a simple pattern.
* All calls are handled by VolumeProcessTemplate.handleCall, which
* calls back into doProcess function.
*
* Everything common to volume handling is abstracted out in handleCall function
* For Example : Checking that volume name is sane, we have a supported
* ozone version number and a valid date. That is everything common is in
* handleCall and actions specific to a call is inside doProcess callback.
*/
@InterfaceAudience.Private
public class VolumeHandler implements Volume {
private static final Logger LOG = LoggerFactory.getLogger(VolumeHandler
.class);
/**
* Creates a volume.
*
* @param volume Volume Name, this has to be unique at Ozone cluster level
* @param quota Quota for this Storage Volume
* - {@literal <int>(<BYTES|MB|GB|TB>)}
* @param req Request Object
* @param uriInfo URI info
* @param headers Http Headers
*
* @return Standard JAX-RS Response
*
* @throws OzoneException
*/
@Override
public Response createVolume(String volume, final String quota, Request req,
UriInfo uriInfo, HttpHeaders headers)
throws OzoneException {
MDC.put(OZONE_FUNCTION, "createVolume");
return new VolumeProcessTemplate() {
@Override
public Response doProcess(VolumeArgs args)
throws IOException, OzoneException {
UserAuth auth = UserHandlerBuilder.getAuthHandler();
if (auth.isAdmin(args)) {
args.setAdminName(args.getUserName());
String volumeOwner = auth.getOzoneUser(args);
if (volumeOwner == null) {
throw ErrorTable.newError(ErrorTable.USER_NOT_FOUND, args);
}
if (!auth.isUser(volumeOwner, args)) {
throw ErrorTable.newError(ErrorTable.USER_NOT_FOUND, args);
}
args.setUserName(volumeOwner);
args.setGroups(auth.getGroups(args));
if (!quota.equals(Header.OZONE_QUOTA_UNDEFINED)) {
setQuotaArgs(args, quota);
}
StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
fs.createVolume(args);
return OzoneRestUtils.getResponse(args, HTTP_CREATED, "");
} else {
throw ErrorTable.newError(ErrorTable.ACCESS_DENIED, args);
}
}
}.handleCall(volume, req, uriInfo, headers);
}
/**
* Updates volume metadata.
*
* There are only two actions possible currently with updateVolume.
* Change the volume ownership or update quota. if you make a call
* with neither of these actions, update just returns 200 OK.
*
* @param volume Volume Name, this has to be unique at Ozone Level
* @param quota Quota for this volume
* - {@literal <int>(<BYTES|MB|GB|TB>)}|remove
* @param req - Request Object
* @param uriInfo - URI info
* @param headers Http Headers
*
* @return Standard JAX-RS Response
*
* @throws OzoneException
*/
@Override
public Response updateVolume(String volume, final String quota, Request req,
UriInfo uriInfo, HttpHeaders headers)
throws OzoneException {
MDC.put(OZONE_FUNCTION, "updateVolume");
return new VolumeProcessTemplate() {
@Override
public Response doProcess(VolumeArgs args)
throws IOException, OzoneException {
UserAuth auth = UserHandlerBuilder.getAuthHandler();
if (auth.isAdmin(args)) {
StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
args.setAdminName(args.getUserName());
String newVolumeOwner = auth.getOzoneUser(args);
if (newVolumeOwner != null) {
if (!auth.isUser(newVolumeOwner, args)) {
throw ErrorTable.newError(ErrorTable.USER_NOT_FOUND, args);
}
args.setUserName(newVolumeOwner);
fs.setVolumeOwner(args);
}
if (!quota.equals(Header.OZONE_QUOTA_UNDEFINED)) {
if (quota.equals(Header.OZONE_QUOTA_REMOVE)) {
// if it is remove, just tell the file system to remove quota
fs.setVolumeQuota(args, true);
} else {
setQuotaArgs(args, quota);
fs.setVolumeQuota(args, false);
}
}
return OzoneRestUtils.getResponse(args, HTTP_OK, "");
} else {
// Only Admins are allowed to update volumes
throw ErrorTable.newError(ErrorTable.ACCESS_DENIED, args);
}
}
}.handleCall(volume, req, uriInfo, headers);
}
/**
* Deletes a volume if it is empty.
*
* @param volume Volume Name
* @param req - Http Request
* @param uriInfo - http URI
* @param headers - http headers
*
* @return Standard JAX-RS Response
*
* @throws OzoneException
*/
@Override
public Response deleteVolume(String volume, Request req, UriInfo uriInfo,
HttpHeaders headers) throws OzoneException {
MDC.put(OZONE_FUNCTION, "deleteVolume");
return new VolumeProcessTemplate() {
@Override
public Response doProcess(VolumeArgs args)
throws IOException, OzoneException {
UserAuth auth = UserHandlerBuilder.getAuthHandler();
if (auth.isAdmin(args)) {
StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
fs.deleteVolume(args);
return OzoneRestUtils.getResponse(args, HTTP_OK, "");
} else {
throw ErrorTable.newError(ErrorTable.ACCESS_DENIED, args);
}
}
}.handleCall(volume, req, uriInfo, headers);
}
/**
* Returns Volume info. This API can be invoked either by admin or the owner
*
* @param volume - Storage Volume Name
* @param info - Info attribute
* @param prefix - Prefix key
* @param maxKeys - Max results
* @param prevKey - PrevKey
* @param req - Http Req
* @param uriInfo - UriInfo.
* @param headers - Http headers
* @return
* @throws OzoneException
*/
@Override
public Response getVolumeInfo(String volume, final String info,
final String prefix,
final int maxKeys,
final String prevKey,
final boolean rootScan,
Request req,
final UriInfo uriInfo, HttpHeaders headers)
throws OzoneException {
return new VolumeProcessTemplate() {
@Override
public Response doProcess(VolumeArgs args)
throws IOException, OzoneException {
switch (info) {
case Header.OZONE_INFO_QUERY_BUCKET:
MDC.put(OZONE_FUNCTION, "ListBucket");
return getBucketsInVolume(args, prefix, maxKeys, prevKey);
case Header.OZONE_INFO_QUERY_VOLUME:
MDC.put(OZONE_FUNCTION, "InfoVolume");
assertNoListParamPresent(uriInfo, args);
return getVolumeInfoResponse(args); // Return volume info
case Header.OZONE_LIST_QUERY_SERVICE:
MDC.put(OZONE_FUNCTION, "ListVolume");
return getVolumesByUser(args, prefix, maxKeys, prevKey, rootScan);
default:
LOG.debug("Unrecognized query param : {} ", info);
OzoneException ozoneException =
ErrorTable.newError(ErrorTable.INVALID_QUERY_PARAM, args);
ozoneException.setMessage("Unrecognized query param : " + info);
throw ozoneException;
}
}
}.handleCall(volume, req, uriInfo, headers);
}
/**
* Asserts no list query param is present during this call.
*
* @param uriInfo - UriInfo. - UriInfo
* @param args - Volume Args - VolumeArgs.
* @throws OzoneException
*/
private void assertNoListParamPresent(final UriInfo uriInfo, VolumeArgs
args) throws
OzoneException {
String prefix = uriInfo.getQueryParameters().getFirst("prefix");
String maxKeys = uriInfo.getQueryParameters().getFirst("max_keys");
String prevKey = uriInfo.getQueryParameters().getFirst("prev_key");
if ((prefix != null && !prefix.equals(Header.OZONE_EMPTY_STRING)) ||
(maxKeys != null && !maxKeys.equals(Header.OZONE_DEFAULT_LIST_SIZE)) ||
(prevKey != null && !prevKey.equals(Header.OZONE_EMPTY_STRING))) {
throw ErrorTable.newError(ErrorTable.INVALID_QUERY_PARAM, args);
}
}
}

View File

@ -1,276 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.handlers;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.Request;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriInfo;
import java.io.IOException;
import java.nio.file.DirectoryNotEmptyException;
import java.nio.file.FileAlreadyExistsException;
import java.nio.file.NoSuchFileException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ozone.OzoneRestUtils;
import org.apache.hadoop.ozone.client.rest.OzoneException;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
import org.apache.hadoop.ozone.web.interfaces.UserAuth;
import org.apache.hadoop.ozone.web.response.ListBuckets;
import org.apache.hadoop.ozone.web.response.ListVolumes;
import org.apache.hadoop.ozone.web.response.VolumeInfo;
import org.apache.hadoop.ozone.web.utils.OzoneUtils;
import static java.net.HttpURLConnection.HTTP_OK;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_COMPONENT;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_REQUEST;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_RESOURCE;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_USER;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.MDC;
/**
* This class abstracts way the repetitive tasks in
* handling volume related code.
*/
@InterfaceAudience.Private
public abstract class VolumeProcessTemplate {
private static final Logger LOG =
LoggerFactory.getLogger(VolumeProcessTemplate.class);
/**
* The handle call is the common functionality for Volume
* handling code.
*
* @param volume - Name of the Volume
* @param request - request
* @param info - UriInfo
* @param headers - Http Headers
*
* @return Response
*
* @throws OzoneException
*/
public Response handleCall(String volume, Request request, UriInfo info,
HttpHeaders headers) throws OzoneException {
String reqID = OzoneUtils.getRequestID();
String hostName = OzoneUtils.getHostName();
MDC.put(OZONE_COMPONENT, "ozone");
MDC.put(OZONE_REQUEST, reqID);
UserArgs userArgs = null;
try {
userArgs = new UserArgs(reqID, hostName, request, info, headers);
OzoneRestUtils.validate(request, headers, reqID, volume, hostName);
// we use the same logic for both bucket and volume names
OzoneUtils.verifyResourceName(volume);
UserAuth auth = UserHandlerBuilder.getAuthHandler();
userArgs.setUserName(auth.getUser(userArgs));
MDC.put(OZONE_USER, userArgs.getUserName());
VolumeArgs args = new VolumeArgs(volume, userArgs);
MDC.put(OZONE_RESOURCE, args.getResourceName());
Response response = doProcess(args);
LOG.info("Success");
MDC.clear();
return response;
} catch (IllegalArgumentException ex) {
LOG.error("Illegal argument.", ex);
throw ErrorTable.newError(ErrorTable.INVALID_VOLUME_NAME, userArgs, ex);
} catch (IOException ex) {
handleIOException(volume, reqID, hostName, ex);
}
return null;
}
/**
* Specific handler for each call.
*
* @param args - Volume Args
*
* @return - Response
*
* @throws IOException
* @throws OzoneException
*/
public abstract Response doProcess(VolumeArgs args)
throws IOException, OzoneException;
/**
* Maps Java File System Exceptions to Ozone Exceptions in the Volume path.
*
* @param volume - Name of the Volume
* @param reqID - Request ID
* @param hostName - HostName
* @param fsExp - Exception
*
* @throws OzoneException
*/
private void handleIOException(String volume, String reqID, String hostName,
IOException fsExp) throws OzoneException {
LOG.error("IOException:", fsExp);
OzoneException exp = null;
if ((fsExp != null && fsExp.getMessage().endsWith(
OzoneManagerProtocolProtos.Status.VOLUME_ALREADY_EXISTS.name()))
|| fsExp instanceof FileAlreadyExistsException) {
exp = ErrorTable
.newError(ErrorTable.VOLUME_ALREADY_EXISTS, reqID, volume, hostName);
}
if (fsExp instanceof DirectoryNotEmptyException) {
exp = ErrorTable
.newError(ErrorTable.VOLUME_NOT_EMPTY, reqID, volume, hostName);
}
if (fsExp instanceof NoSuchFileException) {
exp = ErrorTable
.newError(ErrorTable.INVALID_VOLUME_NAME, reqID, volume, hostName);
}
if ((fsExp != null) && (exp != null)) {
exp.setMessage(fsExp.getMessage());
}
// We don't handle that FS error yet, report a Server Internal Error
if (exp == null) {
exp =
ErrorTable.newError(ErrorTable.SERVER_ERROR, reqID, volume, hostName);
if (fsExp != null) {
exp.setMessage(fsExp.getMessage());
}
}
throw exp;
}
/**
* Set the user provided string into args and throw ozone exception
* if needed.
*
* @param args - volume args
* @param quota - quota sting
*
* @throws OzoneException
*/
void setQuotaArgs(VolumeArgs args, String quota) throws OzoneException {
try {
args.setQuota(quota);
} catch (IllegalArgumentException ex) {
LOG.debug("Malformed Quota.", ex);
throw ErrorTable.newError(ErrorTable.MALFORMED_QUOTA, args, ex);
}
}
/**
* Wraps calls into volumeInfo data.
*
* @param args - volumeArgs
*
* @return - VolumeInfo
*
* @throws IOException
* @throws OzoneException
*/
Response getVolumeInfoResponse(VolumeArgs args)
throws IOException, OzoneException {
StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
VolumeInfo info = fs.getVolumeInfo(args);
return OzoneRestUtils.getResponse(args, HTTP_OK, info.toJsonString());
}
/**
* Returns all the volumes belonging to a user.
*
* @param user - userArgs
* @return - Response
* @throws OzoneException
* @throws IOException
*/
Response getVolumesByUser(UserArgs user, String prefix, int maxKeys,
String prevKey, boolean rootScan) throws OzoneException, IOException {
String validatedUser = user.getUserName();
try {
UserAuth auth = UserHandlerBuilder.getAuthHandler();
if(rootScan && !auth.isAdmin(user)) {
throw ErrorTable.newError(ErrorTable.UNAUTHORIZED, user);
}
if (auth.isAdmin(user)) {
validatedUser = auth.getOzoneUser(user);
if (validatedUser == null) {
validatedUser = auth.getUser(user);
}
}
UserArgs onBehalfOf =
new UserArgs(validatedUser, user.getRequestID(), user.getHostName(),
user.getRequest(), user.getUri(), user.getHeaders());
StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
ListArgs<UserArgs> listArgs = new ListArgs<>(onBehalfOf, prefix,
maxKeys, prevKey);
listArgs.setRootScan(rootScan);
ListVolumes volumes = fs.listVolumes(listArgs);
return OzoneRestUtils.getResponse(user, HTTP_OK, volumes.toJsonString());
} catch (IOException ex) {
LOG.debug("unable to get the volume list for the user.", ex);
OzoneException exp = ErrorTable.newError(ErrorTable.SERVER_ERROR,
user, ex);
exp.setMessage("unable to get the volume list for the user");
throw exp;
}
}
/**
* Returns a list of Buckets in a Volume.
*
* @param args - VolumeArgs
* @param prefix - Prefix to Match
* @param maxKeys - Max results to return.
* @param prevKey - PrevKey
* @return List of Buckets
* @throws OzoneException
*/
Response getBucketsInVolume(VolumeArgs args, String prefix, int maxKeys,
String prevKey) throws OzoneException {
try {
// UserAuth auth = UserHandlerBuilder.getAuthHandler();
// TODO : Check ACLS.
StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
ListArgs<VolumeArgs> listArgs = new ListArgs<>(args, prefix,
maxKeys, prevKey);
ListBuckets bucketList = fs.listBuckets(listArgs);
return OzoneRestUtils
.getResponse(args, HTTP_OK, bucketList.toJsonString());
} catch (IOException ex) {
LOG.debug("unable to get the bucket list for the specified volume.", ex);
OzoneException exp =
ErrorTable.newError(ErrorTable.SERVER_ERROR, args, ex);
exp.setMessage("unable to get the bucket list for the specified volume.");
throw exp;
}
}
}

View File

@ -1,22 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.handlers;
/**
This package contains ozone client side libraries.
*/

Some files were not shown because too many files have changed in this diff Show More