From 77d4b18700cb815610f7775c13145613f7501243 Mon Sep 17 00:00:00 2001 From: Xiaoyu Yao Date: Mon, 18 Sep 2017 15:16:03 -0700 Subject: [PATCH] HDFS-12385. Ozone: OzoneClient: Refactoring OzoneClient API. Contributed by Nadakumar. --- .../apache/hadoop/ozone/OzoneConfigKeys.java | 10 + .../hadoop/ozone/client/BucketArgs.java | 123 +++++ .../hadoop/ozone/client/ObjectStore.java | 91 ++++ .../hadoop/ozone/client/OzoneBucket.java | 172 +++++- .../hadoop/ozone/client/OzoneClient.java | 482 +++-------------- .../ozone/client/OzoneClientFactory.java | 118 +++- .../client/OzoneClientInvocationHandler.java | 62 +++ .../hadoop/ozone/client/OzoneClientUtils.java | 27 +- .../apache/hadoop/ozone/client/OzoneKey.java | 52 +- .../hadoop/ozone/client/OzoneQuota.java | 198 +++++++ .../hadoop/ozone/client/OzoneVolume.java | 179 ++++-- .../hadoop/ozone/client/VolumeArgs.java | 128 +++++ .../ozone/client/protocol/ClientProtocol.java | 296 ++++++++++ .../ozone/client/protocol/package-info.java | 23 + .../ozone/client/rest/OzoneRestClient.java | 510 ------------------ .../hadoop/ozone/client/rest/RestClient.java | 209 +++++++ .../ozone/client/rest/headers/Header.java | 16 +- .../client/rest/headers/package-info.java | 18 +- .../{OzoneRpcClient.java => RpcClient.java} | 374 +++++-------- .../org/apache/hadoop/ozone/tools/Corona.java | 52 +- .../src/main/resources/ozone-default.xml | 11 + .../ozone/client/rpc/TestOzoneRpcClient.java | 259 +++++---- 22 files changed, 1960 insertions(+), 1450 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientInvocationHandler.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneQuota.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/protocol/package-info.java delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/OzoneRestClient.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java rename hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rpc/{OzoneRpcClient.java => RpcClient.java} (60%) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index 272edaff1f3..efa39a5a5e4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -20,6 +20,9 @@ package org.apache.hadoop.ozone; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.ozone.client.protocol.ClientProtocol; +import org.apache.hadoop.ozone.client.rest.RestClient; +import org.apache.hadoop.ozone.client.rpc.RpcClient; import org.apache.hadoop.scm.ScmConfigKeys; /** @@ -103,6 +106,13 @@ public final class OzoneConfigKeys { public static final String OZONE_ADMINISTRATORS = "ozone.administrators"; + public static final String OZONE_CLIENT_PROTOCOL = + "ozone.client.protocol"; + public static final Class + OZONE_CLIENT_PROTOCOL_RPC = RpcClient.class; + public static final Class + OZONE_CLIENT_PROTOCOL_REST = RestClient.class; + public static final String OZONE_CLIENT_SOCKET_TIMEOUT_MS = "ozone.client.socket.timeout.ms"; public static final int OZONE_CLIENT_SOCKET_TIMEOUT_MS_DEFAULT = 5000; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java new file mode 100644 index 00000000000..5d07df29bf5 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java @@ -0,0 +1,123 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.client; + +import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.ozone.OzoneAcl; + +import java.util.List; + +/** + * This class encapsulates the arguments that are + * required for creating a bucket. + */ +public final class BucketArgs { + + /** + * ACL Information. + */ + private List acls; + /** + * Bucket Version flag. + */ + private Boolean isVersionEnabled; + /** + * Type of storage to be used for this bucket. + * [RAM_DISK, SSD, DISK, ARCHIVE] + */ + private StorageType storageType; + + /** + * Private constructor, constructed via builder. + * @param isVersionEnabled Bucket version flag. + * @param storageType Storage type to be used. + * @param acls list of ACLs. + */ + private BucketArgs(Boolean isVersionEnabled, StorageType storageType, + List acls) { + this.acls = acls; + this.isVersionEnabled = isVersionEnabled; + this.storageType = storageType; + } + + /** + * Returns true if bucket version is enabled, else false. + * @return isVersionEnabled + */ + public Boolean isVersionEnabled() { + return isVersionEnabled; + } + + /** + * Returns the type of storage to be used. + * @return StorageType + */ + public StorageType getStorageType() { + return storageType; + } + + /** + * Returns the ACL's associated with this bucket. + * @return List + */ + public List getAcls() { + return acls; + } + + /** + * Returns new builder class that builds a KsmBucketInfo. + * + * @return Builder + */ + public static BucketArgs.Builder newBuilder() { + return new BucketArgs.Builder(); + } + + /** + * Builder for KsmBucketInfo. + */ + public static class Builder { + private Boolean isVersionEnabled; + private StorageType storageType; + private List acls; + + public BucketArgs.Builder setIsVersionEnabled(Boolean versionFlag) { + this.isVersionEnabled = versionFlag; + return this; + } + + public BucketArgs.Builder setStorageType(StorageType storage) { + this.storageType = storage; + return this; + } + + public BucketArgs.Builder setAcls(List listOfAcls) { + this.acls = listOfAcls; + return this; + } + + /** + * Constructs the BucketArgs. + * @return instance of BucketArgs. + */ + public BucketArgs build() { + return new BucketArgs(isVersionEnabled, storageType, acls); + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java new file mode 100644 index 00000000000..f8bb21a311a --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java @@ -0,0 +1,91 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.client; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.ozone.client.protocol.ClientProtocol; + +import java.io.IOException; + +/** + * ObjectStore class is responsible for the client operations that can be + * performed on Ozone Object Store. + */ +public class ObjectStore { + + /** + * The proxy used for connecting to the cluster and perform + * client operations. + */ + private final ClientProtocol proxy; + + /** + * Creates an instance of ObjectStore with the proxy. + * @param proxy ClientProtocol proxy + */ + public ObjectStore(ClientProtocol proxy) { + this.proxy = proxy; + } + + /** + * Creates the volume with default values. + * @param volumeName Name of the volume to be created. + * @throws IOException + */ + public void createVolume(String volumeName) throws IOException { + Preconditions.checkNotNull(volumeName); + proxy.createVolume(volumeName); + } + + /** + * Creates the volume. + * @param volumeName Name of the volume to be created. + * @param volumeArgs Volume properties. + * @throws IOException + */ + public void createVolume(String volumeName, VolumeArgs volumeArgs) + throws IOException { + Preconditions.checkNotNull(volumeName); + Preconditions.checkNotNull(volumeArgs); + proxy.createVolume(volumeName, volumeArgs); + } + + /** + * Returns the volume information. + * @param volumeName Name of the volume. + * @return OzoneVolume + * @throws IOException + */ + public OzoneVolume getVolume(String volumeName) throws IOException { + Preconditions.checkNotNull(volumeName); + OzoneVolume volume = proxy.getVolumeDetails(volumeName); + volume.setClientProxy(proxy); + return volume; + } + + /** + * Deletes the volume. + * @param volumeName Name of the volume. + * @throws IOException + */ + public void deleteVolume(String volumeName) throws IOException { + Preconditions.checkNotNull(volumeName); + proxy.deleteVolume(volumeName); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java index bfd571453c3..6d53c5bf2c1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java @@ -1,29 +1,32 @@ -/* +/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.ozone.client; +import com.google.common.base.Preconditions; import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo; +import org.apache.hadoop.ozone.client.io.OzoneInputStream; +import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.OzoneConsts.Versioning; +import java.io.IOException; import java.util.List; /** @@ -38,36 +41,53 @@ public class OzoneBucket { /** * Name of the bucket. */ - private final String bucketName; + private final String name; /** * Bucket ACLs. */ - private final List acls; + private List acls; /** * Type of storage to be used for this bucket. * [RAM_DISK, SSD, DISK, ARCHIVE] */ - private final StorageType storageType; + private StorageType storageType; /** * Bucket Version flag. */ - private final Versioning versioning; - + private Boolean versioning; /** - * Constructs OzoneBucket from KsmBucketInfo. - * - * @param ksmBucketInfo + * The proxy used for connecting to the cluster and perform + * client operations. */ - public OzoneBucket(KsmBucketInfo ksmBucketInfo) { - this.volumeName = ksmBucketInfo.getVolumeName(); - this.bucketName = ksmBucketInfo.getBucketName(); - this.acls = ksmBucketInfo.getAcls(); - this.storageType = ksmBucketInfo.getStorageType(); - this.versioning = ksmBucketInfo.getIsVersionEnabled() ? - Versioning.ENABLED : Versioning.DISABLED; + private ClientProtocol proxy; + + /** + * Constructs OzoneBucket instance. + * @param volumeName Name of the volume the bucket belongs to. + * @param bucketName Name of the bucket. + * @param acls ACLs associated with the bucket. + * @param storageType StorageType of the bucket. + * @param versioning versioning status of the bucket. + */ + public OzoneBucket(String volumeName, String bucketName, + List acls, StorageType storageType, + Boolean versioning) { + this.volumeName = volumeName; + this.name = bucketName; + this.acls = acls; + this.storageType = storageType; + this.versioning = versioning; + } + + /** + * Sets the proxy using which client operations are performed. + * @param clientProxy + */ + public void setClientProxy(ClientProtocol clientProxy) { + this.proxy = clientProxy; } /** @@ -84,8 +104,8 @@ public class OzoneBucket { * * @return bucketName */ - public String getBucketName() { - return bucketName; + public String getName() { + return name; } /** @@ -111,8 +131,104 @@ public class OzoneBucket { * * @return versioning */ - public Versioning getVersioning() { + public Boolean getVersioning() { return versioning; } + /** + * Adds ACLs to the Bucket. + * @param addAcls ACLs to be added + * @throws IOException + */ + public void addAcls(List addAcls) throws IOException { + Preconditions.checkNotNull(proxy, "Client proxy is not set."); + Preconditions.checkNotNull(addAcls); + proxy.addBucketAcls(volumeName, name, addAcls); + addAcls.stream().filter(acl -> !acls.contains(acl)).forEach( + acls::add); + } + + /** + * Removes ACLs from the bucket. + * @param removeAcls ACLs to be removed + * @throws IOException + */ + public void removeAcls(List removeAcls) throws IOException { + Preconditions.checkNotNull(proxy, "Client proxy is not set."); + Preconditions.checkNotNull(removeAcls); + proxy.removeBucketAcls(volumeName, name, removeAcls); + acls.removeAll(removeAcls); + } + + /** + * Sets/Changes the storage type of the bucket. + * @param newStorageType Storage type to be set + * @throws IOException + */ + public void setStorageType(StorageType newStorageType) throws IOException { + Preconditions.checkNotNull(proxy, "Client proxy is not set."); + Preconditions.checkNotNull(newStorageType); + proxy.setBucketStorageType(volumeName, name, newStorageType); + storageType = newStorageType; + } + + /** + * Enable/Disable versioning of the bucket. + * @param newVersioning + * @throws IOException + */ + public void setVersioning(Boolean newVersioning) throws IOException { + Preconditions.checkNotNull(proxy, "Client proxy is not set."); + Preconditions.checkNotNull(newVersioning); + proxy.setBucketVersioning(volumeName, name, newVersioning); + versioning = newVersioning; + } + + /** + * Creates a new key in the bucket. + * @param key Name of the key to be created. + * @param size Size of the data the key will point to. + * @return OzoneOutputStream to which the data has to be written. + * @throws IOException + */ + public OzoneOutputStream createKey(String key, long size)throws IOException { + Preconditions.checkNotNull(proxy, "Client proxy is not set."); + Preconditions.checkNotNull(key); + return proxy.createKey(volumeName, name, key, size); + } + + /** + * Reads an existing key from the bucket. + * @param key Name of the key to be read. + * @return OzoneInputStream the stream using which the data can be read. + * @throws IOException + */ + public OzoneInputStream readKey(String key) throws IOException { + Preconditions.checkNotNull(proxy, "Client proxy is not set."); + Preconditions.checkNotNull(key); + return proxy.getKey(volumeName, name, key); + } + + /** + * Returns information about the key. + * @param key Name of the key. + * @return OzoneKey Information about the key. + * @throws IOException + */ + public OzoneKey getKey(String key) throws IOException { + Preconditions.checkNotNull(proxy, "Client proxy is not set."); + Preconditions.checkNotNull(key); + return proxy.getKeyDetails(volumeName, name, key); + } + + /** + * Deletes key from the bucket. + * @param key Name of the key to be deleted. + * @throws IOException + */ + public void deleteKey(String key) throws IOException { + Preconditions.checkNotNull(proxy, "Client proxy is not set."); + Preconditions.checkNotNull(key); + proxy.deleteKey(volumeName, name, key); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java index a7808d8d3de..b7c3a11d850 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java @@ -1,415 +1,101 @@ /** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

+ * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.ozone.client; -import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.OzoneConsts.Versioning; -import org.apache.hadoop.ozone.client.io.OzoneInputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.client.protocol.ClientProtocol; +import java.io.Closeable; import java.io.IOException; -import java.util.Iterator; -import java.util.List; /** - * OzoneClient can connect to a Ozone Cluster and + * OzoneClient connects to Ozone Cluster and * perform basic operations. */ -public interface OzoneClient { +public class OzoneClient implements Closeable { + + /* + * OzoneClient connects to Ozone Cluster and + * perform basic operations. + * + * +-------------+ +---+ +-------------------------------------+ + * | OzoneClient | --> | C | | Object Store | + * |_____________| | l | | +-------------------------------+ | + * | i | | | Volume(s) | | + * | e | | | +------------------------+ | | + * | n | | | | Bucket(s) | | | + * | t | | | | +------------------+ | | | + * | | | | | | Key -> Value (s) | | | | + * | P |-->| | | | | | | | + * | r | | | | |__________________| | | | + * | o | | | | | | | + * | t | | | |________________________| | | + * | o | | | | | + * | c | | |_______________________________| | + * | o | | | + * | l | |_____________________________________| + * |___| + * Example: + * ObjectStore store = client.getObjectStore(); + * store.createVolume(“volume one”, VolumeArgs); + * volume.setQuota(“10 GB”); + * OzoneVolume volume = store.getVolume(“volume one”); + * volume.createBucket(“bucket one”, BucketArgs); + * bucket.setVersioning(true); + * OzoneOutputStream os = bucket.createKey(“key one”, 1024); + * os.write(byte[]); + * os.close(); + * OzoneInputStream is = bucket.readKey(“key one”); + * is.read(); + * is.close(); + * bucket.deleteKey(“key one”); + * volume.deleteBucket(“bucket one”); + * store.deleteVolume(“volume one”); + * client.close(); + */ + + private final ClientProtocol proxy; + private final ObjectStore objectStore; /** - * Creates a new Volume. - * - * @param volumeName Name of the Volume - * + * Creates a new OzoneClient object, generally constructed + * using {@link OzoneClientFactory}. + * @param proxy + */ + public OzoneClient(ClientProtocol proxy) { + this.proxy = proxy; + this.objectStore = new ObjectStore(this.proxy); + } + + /** + * Returns the object store associated with the Ozone Cluster. + * @return ObjectStore + */ + public ObjectStore getObjectStore() { + return objectStore; + } + + /** + * Closes the client and all the underlying resources. * @throws IOException */ - void createVolume(String volumeName) - throws IOException; - - /** - * Creates a new Volume, with owner set. - * - * @param volumeName Name of the Volume - * @param owner Owner to be set for Volume - * - * @throws IOException - */ - void createVolume(String volumeName, String owner) - throws IOException; - - /** - * Creates a new Volume, with owner and quota set. - * - * @param volumeName Name of the Volume - * @param owner Owner to be set for Volume - * @param acls ACLs to be added to the Volume - * - * @throws IOException - */ - void createVolume(String volumeName, String owner, - OzoneAcl... acls) - throws IOException; - - /** - * Creates a new Volume, with owner and quota set. - * - * @param volumeName Name of the Volume - * @param owner Owner to be set for Volume - * @param quota Volume Quota - * - * @throws IOException - */ - void createVolume(String volumeName, String owner, - long quota) - throws IOException; - - /** - * Creates a new Volume, with owner and quota set. - * - * @param volumeName Name of the Volume - * @param owner Owner to be set for Volume - * @param quota Volume Quota - * @param acls ACLs to be added to the Volume - * - * @throws IOException - */ - void createVolume(String volumeName, String owner, - long quota, OzoneAcl... acls) - throws IOException; - - /** - * Sets the owner of the volume. - * - * @param volumeName Name of the Volume - * @param owner to be set for the Volume - * - * @throws IOException - */ - void setVolumeOwner(String volumeName, String owner) throws IOException; - - /** - * Set Volume Quota. - * - * @param volumeName Name of the Volume - * @param quota Quota to be set for the Volume - * - * @throws IOException - */ - void setVolumeQuota(String volumeName, long quota) - throws IOException; - - /** - * Returns {@link OzoneVolume}. - * - * @param volumeName Name of the Volume - * - * @return KsmVolumeArgs - * - * @throws OzoneVolume - * */ - OzoneVolume getVolumeDetails(String volumeName) - throws IOException; - - /** - * Checks if a Volume exists and the user with a role specified has access - * to the Volume. - * - * @param volumeName Name of the Volume - * @param acl requested acls which needs to be checked for access - * - * @return Boolean - True if the user with a role can access the volume. - * This is possible for owners of the volume and admin users - * - * @throws IOException - */ - boolean checkVolumeAccess(String volumeName, OzoneAcl acl) - throws IOException; - - /** - * Deletes an Empty Volume. - * - * @param volumeName Name of the Volume - * - * @throws IOException - */ - void deleteVolume(String volumeName) throws IOException; - - /** - * Returns the List of Volumes owned by current user. - * - * @param volumePrefix Volume prefix to match - * - * @return KsmVolumeArgs Iterator - * - * @throws IOException - */ - Iterator listVolumes(String volumePrefix) - throws IOException; - - /** - * Returns the List of Volumes owned by the specific user. - * - * @param volumePrefix Volume prefix to match - * @param user User Name - * - * @return KsmVolumeArgs Iterator - * - * @throws IOException - */ - Iterator listVolumes(String volumePrefix, String user) - throws IOException; - - /** - * Creates a new Bucket in the Volume. - * - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * - * @throws IOException - */ - void createBucket(String volumeName, String bucketName) - throws IOException; - - /** - * Creates a new Bucket in the Volume, with versioning set. - * - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * @param versioning Bucket versioning - * - * @throws IOException - */ - void createBucket(String volumeName, String bucketName, - Versioning versioning) - throws IOException; - - /** - * Creates a new Bucket in the Volume, with storage type set. - * - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * @param storageType StorageType for the Bucket - * - * @throws IOException - */ - void createBucket(String volumeName, String bucketName, - StorageType storageType) - throws IOException; - - /** - * Creates a new Bucket in the Volume, with ACLs set. - * - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * @param acls OzoneAcls for the Bucket - * - * @throws IOException - */ - void createBucket(String volumeName, String bucketName, - OzoneAcl... acls) - throws IOException; - - - /** - * Creates a new Bucket in the Volume, with versioning - * storage type and ACLs set. - * - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * @param storageType StorageType for the Bucket - * - * @throws IOException - */ - void createBucket(String volumeName, String bucketName, - Versioning versioning, - StorageType storageType, OzoneAcl... acls) - throws IOException; - - /** - * Adds or Removes ACLs from a Bucket. - * - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * - * @throws IOException - */ - void addBucketAcls(String volumeName, String bucketName, - List addAcls) - throws IOException; - - /** - * Adds or Removes ACLs from a Bucket. - * - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * - * @throws IOException - */ - void removeBucketAcls(String volumeName, String bucketName, - List removeAcls) - throws IOException; - - - /** - * Enables or disables Bucket Versioning. - * - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * - * @throws IOException - */ - void setBucketVersioning(String volumeName, String bucketName, - Versioning versioning) - throws IOException; - - /** - * Sets the Storage Class of a Bucket. - * - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * - * @throws IOException - */ - void setBucketStorageType(String volumeName, String bucketName, - StorageType storageType) - throws IOException; - - /** - * Deletes a bucket if it is empty. - * - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * - * @throws IOException - */ - void deleteBucket(String volumeName, String bucketName) - throws IOException; - - /** - * true if the bucket exists and user has read access - * to the bucket else throws Exception. - * - * @param volumeName Name of the Volume - * - * @throws IOException - */ - void checkBucketAccess(String volumeName, String bucketName) - throws IOException; - - /** - * Returns {@link OzoneBucket}. - * - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * - * @return OzoneBucket - * - * @throws IOException - */ - OzoneBucket getBucketDetails(String volumeName, String bucketName) - throws IOException; - - /** - * Returns the List of Buckets in the Volume. - * - * @param volumeName Name of the Volume - * @param bucketPrefix Bucket prefix to match - * - * @return KsmVolumeArgs Iterator - * - * @throws IOException - */ - Iterator listBuckets(String volumeName, String bucketPrefix) - throws IOException; - - /** - * Writes a key in an existing bucket. - * - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * @param size Size of the data - * - * @return OutputStream - * - */ - OzoneOutputStream createKey(String volumeName, String bucketName, - String keyName, long size) - throws IOException; - - /** - * Reads a key from an existing bucket. - * - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * - * @return LengthInputStream - * - * @throws IOException - */ - OzoneInputStream getKey(String volumeName, String bucketName, String keyName) - throws IOException; - - - /** - * Deletes an existing key. - * - * @param volumeName Name of the Volume - * - * @throws IOException - */ - void deleteKey(String volumeName, String bucketName, String keyName) - throws IOException; - - - /** - * Returns list of {@link OzoneKey} in Volume/Bucket. - * - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * - * @return OzoneKey - * - * @throws IOException - */ - List listKeys(String volumeName, String bucketName, - String keyPrefix) - throws IOException; - - - /** - * Get OzoneKey. - * - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * @param keyName Key name - * - * @return OzoneKey - * - * @throws IOException - */ - OzoneKey getKeyDetails(String volumeName, String bucketName, - String keyName) - throws IOException; - - /** - * Close and release the resources. - */ - void close() throws IOException; + @Override + public void close() throws IOException { + proxy.close(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java index b098be9b1a6..580cd110d5a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java @@ -20,64 +20,94 @@ package org.apache.hadoop.ozone.client; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ozone.OzoneConfiguration; -import org.apache.hadoop.ozone.client.rest.OzoneRestClient; -import org.apache.hadoop.ozone.client.rpc.OzoneRpcClient; +import org.apache.hadoop.ozone.client.protocol.ClientProtocol; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import java.io.IOException; +import java.lang.reflect.Constructor; +import java.lang.reflect.Proxy; + +import static org.apache.hadoop.ozone.OzoneConfigKeys + .OZONE_CLIENT_PROTOCOL; +import static org.apache.hadoop.ozone.OzoneConfigKeys + .OZONE_CLIENT_PROTOCOL_REST; +import static org.apache.hadoop.ozone.OzoneConfigKeys + .OZONE_CLIENT_PROTOCOL_RPC; /** * Factory class to create different types of OzoneClients. + * Based on ozone.client.protocol, it decides which + * protocol to use for the communication. + * Default value is + * org.apache.hadoop.ozone.client.rpc.RpcClient.
+ * OzoneClientFactory constructs a proxy using + * {@link OzoneClientInvocationHandler} + * and creates OzoneClient instance with it. + * {@link OzoneClientInvocationHandler} dispatches the call to + * underlying {@link ClientProtocol} implementation. */ public final class OzoneClientFactory { + private enum ClientType { + RPC, REST + } + /** * Private constructor, class is not meant to be initialized. */ private OzoneClientFactory(){} + private static final Logger LOG = LoggerFactory.getLogger( + OzoneClientFactory.class); + private static Configuration configuration; /** - * Returns an OzoneClient which will use RPC protocol to perform - * client operations. - * + * Returns an OzoneClient which will use protocol defined through + * ozone.client.protocol to perform client operations. * @return OzoneClient * @throws IOException */ public static OzoneClient getClient() throws IOException { - //TODO: get client based on ozone.client.protocol - return new OzoneRpcClient(getConfiguration()); + return getClient(null); } /** * Returns an OzoneClient which will use RPC protocol to perform * client operations. - * * @return OzoneClient * @throws IOException */ public static OzoneClient getRpcClient() throws IOException { - return new OzoneRpcClient(getConfiguration()); + return getClient(ClientType.RPC); } /** - * Returns an OzoneClient which will use RPC protocol to perform + * Returns an OzoneClient which will use REST protocol to perform * client operations. - * * @return OzoneClient * @throws IOException */ public static OzoneClient getRestClient() throws IOException { - return new OzoneRestClient(getConfiguration()); + return getClient(ClientType.REST); } /** - * Sets the configuration, which will be used while creating OzoneClient. - * - * @param conf + * Returns OzoneClient with protocol type set base on ClientType. + * @param clientType + * @return OzoneClient + * @throws IOException */ - public static void setConfiguration(Configuration conf) { - configuration = conf; + private static OzoneClient getClient(ClientType clientType) + throws IOException { + OzoneClientInvocationHandler clientHandler = + new OzoneClientInvocationHandler(getProtocolClass(clientType)); + ClientProtocol proxy = (ClientProtocol) Proxy.newProxyInstance( + OzoneClientInvocationHandler.class.getClassLoader(), + new Class[]{ClientProtocol.class}, clientHandler); + return new OzoneClient(proxy); } /** @@ -92,4 +122,58 @@ public final class OzoneClientFactory { } return configuration; } + + /** + * Based on the clientType, client protocol instance is created. + * If clientType is null, ozone.client.protocol property + * will be used to decide the protocol to be used. + * @param clientType type of client protocol to be created + * @return ClientProtocol implementation + * @throws IOException + */ + private static ClientProtocol getProtocolClass(ClientType clientType) + throws IOException { + Class protocolClass = null; + if(clientType != null) { + switch (clientType) { + case RPC: + protocolClass = OZONE_CLIENT_PROTOCOL_RPC; + break; + case REST: + protocolClass = OZONE_CLIENT_PROTOCOL_REST; + break; + default: + LOG.warn("Invalid ClientProtocol type, falling back to RPC."); + protocolClass = OZONE_CLIENT_PROTOCOL_RPC; + break; + } + } else { + protocolClass = (Class) + getConfiguration().getClass( + OZONE_CLIENT_PROTOCOL, OZONE_CLIENT_PROTOCOL_RPC); + } + try { + Constructor ctor = + protocolClass.getConstructor(Configuration.class); + return ctor.newInstance(getConfiguration()); + } catch (Exception e) { + final String message = "Couldn't create protocol " + protocolClass; + LOG.warn(message, e); + if (e.getCause() instanceof IOException) { + throw (IOException) e.getCause(); + } else { + throw new IOException(message, e); + } + } + } + + /** + * Sets the configuration, which will be used while creating OzoneClient. + * + * @param conf + */ + public static void setConfiguration(Configuration conf) { + configuration = conf; + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientInvocationHandler.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientInvocationHandler.java new file mode 100644 index 00000000000..3051e2dd1c8 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientInvocationHandler.java @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.client; + +import org.apache.hadoop.ozone.client.protocol.ClientProtocol; +import org.apache.hadoop.util.Time; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; + +/** + * Invocation Handler for ozone client which dispatches the call to underlying + * ClientProtocol implementation. + */ +public class OzoneClientInvocationHandler implements InvocationHandler { + + + private static final Logger LOG = LoggerFactory.getLogger(OzoneClient.class); + private final ClientProtocol target; + + /** + * Constructs OzoneClientInvocationHandler with the proxy. + * @param target proxy to be used for method invocation. + */ + public OzoneClientInvocationHandler(ClientProtocol target) { + this.target = target; + } + + @Override + public Object invoke(Object proxy, Method method, Object[] args) + throws Throwable { + LOG.trace("Invoking method {} on proxy {}", method, proxy); + try { + long startTime = Time.monotonicNow(); + Object result = method.invoke(target, args); + LOG.debug("Call: {} took {} ms", method, + Time.monotonicNow() - startTime); + return result; + } catch(InvocationTargetException iEx) { + throw iEx.getCause(); + } + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java index 9390a8559aa..cc3632dedc3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java @@ -1,18 +1,19 @@ /** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

+ * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.ozone.client; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java index 368736ae38c..d6a591f9908 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java @@ -1,28 +1,23 @@ -/* +/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.ozone.client; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo; - -import java.util.List; - /** * A class that encapsulates OzoneKey. */ @@ -39,27 +34,22 @@ public class OzoneKey { /** * Name of the Key. */ - private final String keyName; + private final String name; /** * Size of the data. */ private final long dataSize; - /** - * All the locations of this key, in an ordered list. - */ - private final List keyLocations; /** * Constructs OzoneKey from KsmKeyInfo. * - * @param ksmKeyInfo */ - public OzoneKey(KsmKeyInfo ksmKeyInfo) { - this.volumeName = ksmKeyInfo.getVolumeName(); - this.bucketName = ksmKeyInfo.getBucketName(); - this.keyName = ksmKeyInfo.getKeyName(); - this.dataSize = ksmKeyInfo.getDataSize(); - this.keyLocations = ksmKeyInfo.getKeyLocationList(); + public OzoneKey(String volumeName, String bucketName, + String keyName, long size) { + this.volumeName = volumeName; + this.bucketName = bucketName; + this.name = keyName; + this.dataSize = size; } /** @@ -85,8 +75,8 @@ public class OzoneKey { * * @return keyName */ - public String getKeyName() { - return keyName; + public String getName() { + return name; } /** @@ -98,12 +88,4 @@ public class OzoneKey { return dataSize; } - /** - * Retruns the list of the key locations. - * - * @return key locations - */ - public List getKeyLocations() { - return keyLocations; - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneQuota.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneQuota.java new file mode 100644 index 00000000000..bff529b51b7 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneQuota.java @@ -0,0 +1,198 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.client; + +import org.apache.hadoop.ozone.OzoneConsts; + + +/** + * represents an OzoneQuota Object that can be applied to + * a storage volume. + */ +public class OzoneQuota { + + public static final String OZONE_QUOTA_BYTES = "BYTES"; + public static final String OZONE_QUOTA_MB = "MB"; + public static final String OZONE_QUOTA_GB = "GB"; + public static final String OZONE_QUOTA_TB = "TB"; + + private Units unit; + private int size; + + /** Quota Units.*/ + public enum Units {UNDEFINED, BYTES, MB, GB, TB} + + /** + * Returns size. + * + * @return int + */ + public int getSize() { + return size; + } + + /** + * Returns Units. + * + * @return Unit in MB, GB or TB + */ + public Units getUnit() { + return unit; + } + + /** + * Constructs a default Quota object. + */ + public OzoneQuota() { + this.size = 0; + this.unit = Units.UNDEFINED; + } + + /** + * Constructor for Ozone Quota. + * + * @param size - Integer Size + * @param unit MB, GB or TB + */ + public OzoneQuota(int size, Units unit) { + this.size = size; + this.unit = unit; + } + + /** + * Formats a quota as a string. + * + * @param quota the quota to format + * @return string representation of quota + */ + public static String formatQuota(OzoneQuota quota) { + return String.valueOf(quota.size) + quota.unit; + } + + /** + * Parses a user provided string and returns the + * Quota Object. + * + * @param quotaString Quota String + * + * @return OzoneQuota object + * + * @throws IllegalArgumentException + */ + public static OzoneQuota parseQuota(String quotaString) + throws IllegalArgumentException { + + if ((quotaString == null) || (quotaString.isEmpty())) { + throw new IllegalArgumentException( + "Quota string cannot be null or empty."); + } + + String uppercase = quotaString.toUpperCase().replaceAll("\\s+", ""); + String size = ""; + int nSize; + Units currUnit = Units.MB; + Boolean found = false; + if (uppercase.endsWith(OZONE_QUOTA_MB)) { + size = uppercase + .substring(0, uppercase.length() - OZONE_QUOTA_MB.length()); + currUnit = Units.MB; + found = true; + } + + if (uppercase.endsWith(OZONE_QUOTA_GB)) { + size = uppercase + .substring(0, uppercase.length() - OZONE_QUOTA_GB.length()); + currUnit = Units.GB; + found = true; + } + + if (uppercase.endsWith(OZONE_QUOTA_TB)) { + size = uppercase + .substring(0, uppercase.length() - OZONE_QUOTA_TB.length()); + currUnit = Units.TB; + found = true; + } + + if (uppercase.endsWith(OZONE_QUOTA_BYTES)) { + size = uppercase + .substring(0, uppercase.length() - OZONE_QUOTA_BYTES.length()); + currUnit = Units.BYTES; + found = true; + } + + if (!found) { + throw new IllegalArgumentException( + "Quota unit not recognized. Supported values are BYTES, MB, GB and " + + "TB."); + } + + nSize = Integer.parseInt(size); + if (nSize < 0) { + throw new IllegalArgumentException("Quota cannot be negative."); + } + + return new OzoneQuota(nSize, currUnit); + } + + + /** + * Returns size in Bytes or -1 if there is no Quota. + */ + public long sizeInBytes() { + switch (this.unit) { + case BYTES: + return this.getSize(); + case MB: + return this.getSize() * OzoneConsts.MB; + case GB: + return this.getSize() * OzoneConsts.GB; + case TB: + return this.getSize() * OzoneConsts.TB; + case UNDEFINED: + default: + return -1; + } + } + + /** + * Returns OzoneQuota corresponding to size in bytes. + * + * @param sizeInBytes size in bytes to be converted + * + * @return OzoneQuota object + */ + public static OzoneQuota getOzoneQuota(long sizeInBytes) { + long size; + Units unit; + if (sizeInBytes % OzoneConsts.TB == 0) { + size = sizeInBytes / OzoneConsts.TB; + unit = Units.TB; + } else if (sizeInBytes % OzoneConsts.GB == 0) { + size = sizeInBytes / OzoneConsts.GB; + unit = Units.GB; + } else if (sizeInBytes % OzoneConsts.MB == 0) { + size = sizeInBytes / OzoneConsts.MB; + unit = Units.MB; + } else { + size = sizeInBytes; + unit = Units.BYTES; + } + return new OzoneQuota((int)size, unit); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java index 9c2ec3d8fee..575fb25035f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java @@ -1,81 +1,73 @@ -/* +/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.ozone.client; -import org.apache.hadoop.ozone.ksm.helpers.KsmOzoneAclMap; -import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs; +import com.google.common.base.Preconditions; +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.client.protocol.ClientProtocol; + +import java.io.IOException; +import java.util.List; /** * A class that encapsulates OzoneVolume. */ public class OzoneVolume { + /** + * Name of the Volume. + */ + private final String name; + /** * Admin Name of the Volume. */ - private final String adminName; + private String admin; /** * Owner of the Volume. */ - private final String ownerName; - /** - * Name of the Volume. - */ - private final String volumeName; + private String owner; /** * Quota allocated for the Volume. */ - private final long quotaInBytes; + private long quotaInBytes; /** * Volume ACLs. */ - private final KsmOzoneAclMap aclMap; + private List acls; + + private ClientProtocol proxy; /** - * Constructs OzoneVolume from KsmVolumeArgs. - * - * @param ksmVolumeArgs + * Constructs OzoneVolume. */ - public OzoneVolume(KsmVolumeArgs ksmVolumeArgs) { - this.adminName = ksmVolumeArgs.getAdminName(); - this.ownerName = ksmVolumeArgs.getOwnerName(); - this.volumeName = ksmVolumeArgs.getVolume(); - this.quotaInBytes = ksmVolumeArgs.getQuotaInBytes(); - this.aclMap = ksmVolumeArgs.getAclMap(); + public OzoneVolume(String name, String admin, String owner, + long quotaInBytes, List acls) { + this.name = name; + this.admin = admin; + this.owner = owner; + this.quotaInBytes = quotaInBytes; + this.acls = acls; } - /** - * Returns Volume's admin name. - * - * @return adminName - */ - public String getAdminName() { - return adminName; - } - - /** - * Returns Volume's owner name. - * - * @return ownerName - */ - public String getOwnerName() { - return ownerName; + public void setClientProxy(ClientProtocol clientProxy) { + this.proxy = clientProxy; } /** @@ -83,8 +75,26 @@ public class OzoneVolume { * * @return volumeName */ - public String getVolumeName() { - return volumeName; + public String getName() { + return name; + } + + /** + * Returns Volume's admin name. + * + * @return adminName + */ + public String getAdmin() { + return admin; + } + + /** + * Returns Volume's owner name. + * + * @return ownerName + */ + public String getOwner() { + return owner; } /** @@ -101,7 +111,82 @@ public class OzoneVolume { * * @return aclMap */ - public KsmOzoneAclMap getAclMap() { - return aclMap; + public List getAcls() { + return acls; + } + + /** + * Sets/Changes the owner of this Volume. + * @param owner new owner + * @throws IOException + */ + public void setOwner(String owner) throws IOException { + Preconditions.checkNotNull(proxy, "Client proxy is not set."); + Preconditions.checkNotNull(owner); + proxy.setVolumeOwner(name, owner); + this.owner = owner; + } + + /** + * Sets/Changes the quota of this Volume. + * @param quota new quota + * @throws IOException + */ + public void setQuota(OzoneQuota quota) throws IOException { + Preconditions.checkNotNull(proxy, "Client proxy is not set."); + Preconditions.checkNotNull(quota); + proxy.setVolumeQuota(name, quota); + this.quotaInBytes = quota.sizeInBytes(); + } + + /** + * Creates a new Bucket in this Volume, with default values. + * @param bucketName Name of the Bucket + * @throws IOException + */ + public void createBucket(String bucketName) + throws IOException { + Preconditions.checkNotNull(proxy, "Client proxy is not set."); + Preconditions.checkNotNull(bucketName); + proxy.createBucket(name, bucketName); + } + + /** + * Creates a new Bucket in this Volume, with properties set in bucketArgs. + * @param bucketName Name of the Bucket + * @param bucketArgs Properties to be set + * @throws IOException + */ + public void createBucket(String bucketName, BucketArgs bucketArgs) + throws IOException { + Preconditions.checkNotNull(proxy, "Client proxy is not set."); + Preconditions.checkNotNull(bucketName); + Preconditions.checkNotNull(bucketArgs); + proxy.createBucket(name, bucketName, bucketArgs); + } + + /** + * Get the Bucket from this Volume. + * @param bucketName Name of the Bucket + * @return OzoneBucket + * @throws IOException + */ + public OzoneBucket getBucket(String bucketName) throws IOException { + Preconditions.checkNotNull(proxy, "Client proxy is not set."); + Preconditions.checkNotNull(bucketName); + OzoneBucket bucket = proxy.getBucketDetails(name, bucketName); + bucket.setClientProxy(proxy); + return bucket; + } + + /** + * Deletes the Bucket from this Volume. + * @param bucketName Name of the Bucket + * @throws IOException + */ + public void deleteBucket(String bucketName) throws IOException { + Preconditions.checkNotNull(proxy, "Client proxy is not set."); + Preconditions.checkNotNull(bucketName); + proxy.deleteBucket(name, bucketName); } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java new file mode 100644 index 00000000000..f1aa03108a5 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java @@ -0,0 +1,128 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.client; + +import org.apache.hadoop.ozone.OzoneAcl; + +import java.io.IOException; +import java.util.List; + +/** + * This class encapsulates the arguments that are + * required for creating a volume. + */ +public final class VolumeArgs { + + private final String admin; + private final String owner; + private final String quota; + private final List acls; + + /** + * Private constructor, constructed via builder. + * @param admin Administrator's name. + * @param owner Volume owner's name + * @param quota Volume Quota. + * @param acls User to access rights map. + */ + private VolumeArgs(String admin, String owner, + String quota, List acls) { + this.admin = admin; + this.owner = owner; + this.quota = quota; + this.acls = acls; + } + + /** + * Returns the Admin Name. + * @return String. + */ + public String getAdmin() { + return admin; + } + + /** + * Returns the owner Name. + * @return String + */ + public String getOwner() { + return owner; + } + + /** + * Returns Volume Quota. + * @return Quota. + */ + public String getQuota() { + return quota; + } + + public List getAcls() { + return acls; + } + /** + * Returns new builder class that builds a KsmVolumeArgs. + * + * @return Builder + */ + public static VolumeArgs.Builder newBuilder() { + return new VolumeArgs.Builder(); + } + + /** + * Builder for KsmVolumeArgs. + */ + public static class Builder { + private String adminName; + private String ownerName; + private String volumeQuota; + private List listOfAcls; + + + public VolumeArgs.Builder setAdmin(String admin) { + this.adminName = admin; + return this; + } + + public VolumeArgs.Builder setOwner(String owner) { + this.ownerName = owner; + return this; + } + + public VolumeArgs.Builder setQuota(String quota) { + this.volumeQuota = quota; + return this; + } + + public VolumeArgs.Builder setAcls(List acls) + throws IOException { + this.listOfAcls = acls; + return this; + } + + /** + * Constructs a CreateVolumeArgument. + * @return CreateVolumeArgs. + */ + public VolumeArgs build() { + return new VolumeArgs(adminName, ownerName, volumeQuota, listOfAcls); + } + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java new file mode 100644 index 00000000000..54aa3f9bd8f --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java @@ -0,0 +1,296 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.client.protocol; + +import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.client.BucketArgs; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneKey; +import org.apache.hadoop.ozone.client.OzoneQuota; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.client.VolumeArgs; +import org.apache.hadoop.ozone.client.io.OzoneInputStream; +import org.apache.hadoop.ozone.client.io.OzoneOutputStream; + +import java.io.IOException; +import java.util.Iterator; +import java.util.List; + +/** + * An implementer of this interface is capable of connecting to Ozone Cluster + * and perform client operations. The protocol used for communication is + * determined by the implementation class specified by + * property ozone.client.protocol. The build-in implementation + * includes: {@link org.apache.hadoop.ozone.client.rpc.RpcClient} for RPC and + * {@link org.apache.hadoop.ozone.client.rest.RestClient} for REST. + */ +public interface ClientProtocol { + + /** + * Creates a new Volume. + * @param volumeName Name of the Volume + * @throws IOException + */ + void createVolume(String volumeName) + throws IOException; + + /** + * Creates a new Volume with properties set in VolumeArgs. + * @param volumeName Name of the Volume + * @param args Properties to be set for the Volume + * @throws IOException + */ + void createVolume(String volumeName, VolumeArgs args) + throws IOException; + + /** + * Sets the owner of volume. + * @param volumeName Name of the Volume + * @param owner to be set for the Volume + * @throws IOException + */ + void setVolumeOwner(String volumeName, String owner) throws IOException; + + /** + * Set Volume Quota. + * @param volumeName Name of the Volume + * @param quota Quota to be set for the Volume + * @throws IOException + */ + void setVolumeQuota(String volumeName, OzoneQuota quota) + throws IOException; + + /** + * Returns {@link OzoneVolume}. + * @param volumeName Name of the Volume + * @return {@link OzoneVolume} + * @throws IOException + * */ + OzoneVolume getVolumeDetails(String volumeName) + throws IOException; + + /** + * Checks if a Volume exists and the user with a role specified has access + * to the Volume. + * @param volumeName Name of the Volume + * @param acl requested acls which needs to be checked for access + * @return Boolean - True if the user with a role can access the volume. + * This is possible for owners of the volume and admin users + * @throws IOException + */ + boolean checkVolumeAccess(String volumeName, OzoneAcl acl) + throws IOException; + + /** + * Deletes an empty Volume. + * @param volumeName Name of the Volume + * @throws IOException + */ + void deleteVolume(String volumeName) throws IOException; + + /** + * Returns the List of Volumes owned by current user. + * @param volumePrefix Volume prefix to match + * @return {@link OzoneVolume} Iterator + * @throws IOException + */ + Iterator listVolumes(String volumePrefix) + throws IOException; + + /** + * Returns the List of Volumes owned by the specific user. + * @param volumePrefix Volume prefix to match + * @param user User Name + * @return {@link OzoneVolume} Iterator + * @throws IOException + */ + Iterator listVolumes(String volumePrefix, String user) + throws IOException; + + /** + * Creates a new Bucket in the Volume. + * @param volumeName Name of the Volume + * @param bucketName Name of the Bucket + * @throws IOException + */ + void createBucket(String volumeName, String bucketName) + throws IOException; + + /** + * Creates a new Bucket in the Volume, with properties set in BucketArgs. + * @param volumeName Name of the Volume + * @param bucketName Name of the Bucket + * @param bucketArgs Bucket Arguments + * @throws IOException + */ + void createBucket(String volumeName, String bucketName, + BucketArgs bucketArgs) + throws IOException; + + /** + * Adds ACLs to the Bucket. + * @param volumeName Name of the Volume + * @param bucketName Name of the Bucket + * @param addAcls ACLs to be added + * @throws IOException + */ + void addBucketAcls(String volumeName, String bucketName, + List addAcls) + throws IOException; + + /** + * Removes ACLs from a Bucket. + * @param volumeName Name of the Volume + * @param bucketName Name of the Bucket + * @param removeAcls ACLs to be removed + * @throws IOException + */ + void removeBucketAcls(String volumeName, String bucketName, + List removeAcls) + throws IOException; + + + /** + * Enables or disables Bucket Versioning. + * @param volumeName Name of the Volume + * @param bucketName Name of the Bucket + * @param versioning True to enable Versioning, False to disable. + * @throws IOException + */ + void setBucketVersioning(String volumeName, String bucketName, + Boolean versioning) + throws IOException; + + /** + * Sets the Storage Class of a Bucket. + * @param volumeName Name of the Volume + * @param bucketName Name of the Bucket + * @param storageType StorageType to be set + * @throws IOException + */ + void setBucketStorageType(String volumeName, String bucketName, + StorageType storageType) + throws IOException; + + /** + * Deletes a bucket if it is empty. + * @param volumeName Name of the Volume + * @param bucketName Name of the Bucket + * @throws IOException + */ + void deleteBucket(String volumeName, String bucketName) + throws IOException; + + /** + * True if the bucket exists and user has read access + * to the bucket else throws Exception. + * @param volumeName Name of the Volume + * @param bucketName Name of the Bucket + * @throws IOException + */ + void checkBucketAccess(String volumeName, String bucketName) + throws IOException; + + /** + * Returns {@link OzoneBucket}. + * @param volumeName Name of the Volume + * @param bucketName Name of the Bucket + * @return {@link OzoneBucket} + * @throws IOException + */ + OzoneBucket getBucketDetails(String volumeName, String bucketName) + throws IOException; + + /** + * Returns the List of Buckets in the Volume. + * @param volumeName Name of the Volume + * @param bucketPrefix Bucket prefix to match + * @return {@link OzoneBucket} Iterator + * @throws IOException + */ + Iterator listBuckets(String volumeName, String bucketPrefix) + throws IOException; + + /** + * Writes a key in an existing bucket. + * @param volumeName Name of the Volume + * @param bucketName Name of the Bucket + * @param keyName Name of the Key + * @param size Size of the data + * @return {@link OzoneOutputStream} + * + */ + OzoneOutputStream createKey(String volumeName, String bucketName, + String keyName, long size) + throws IOException; + + /** + * Reads a key from an existing bucket. + * @param volumeName Name of the Volume + * @param bucketName Name of the Bucket + * @param keyName Name of the Key + * @return {@link OzoneInputStream} + * @throws IOException + */ + OzoneInputStream getKey(String volumeName, String bucketName, String keyName) + throws IOException; + + + /** + * Deletes an existing key. + * @param volumeName Name of the Volume + * @param bucketName Name of the Bucket + * @param keyName Name of the Key + * @throws IOException + */ + void deleteKey(String volumeName, String bucketName, String keyName) + throws IOException; + + + /** + * Returns list of {@link OzoneKey} in {Volume/Bucket}. + * @param volumeName Name of the Volume + * @param bucketName Name of the Bucket + * @return {@link OzoneKey} Iterator + * @throws IOException + */ + Iterator listKeys(String volumeName, String bucketName, + String keyPrefix) + throws IOException; + + + /** + * Get OzoneKey. + * @param volumeName Name of the Volume + * @param bucketName Name of the Bucket + * @param keyName Key name + * @return {@link OzoneKey} + * @throws IOException + */ + OzoneKey getKeyDetails(String volumeName, String bucketName, + String keyName) + throws IOException; + + /** + * Close and release the resources. + */ + void close() throws IOException; + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/protocol/package-info.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/protocol/package-info.java new file mode 100644 index 00000000000..f4890a1e8b8 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/protocol/package-info.java @@ -0,0 +1,23 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.client.protocol; + +/** + * This package contains Ozone client protocol library classes. + */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/OzoneRestClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/OzoneRestClient.java deleted file mode 100644 index 4955002c254..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/OzoneRestClient.java +++ /dev/null @@ -1,510 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.client.rest; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.OzoneConfiguration; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.OzoneConsts.Versioning; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientUtils; -import org.apache.hadoop.ozone.client.OzoneKey; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.io.OzoneInputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.ksm.KSMConfigKeys; -import org.apache.hadoop.ozone.client.rest.headers.Header; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.Time; -import org.apache.http.HttpEntity; -import org.apache.http.HttpResponse; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpRequestBase; -import org.apache.http.client.utils.URIBuilder; -import org.apache.http.impl.client.CloseableHttpClient; -import org.apache.http.util.EntityUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.ws.rs.core.HttpHeaders; -import java.io.Closeable; -import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; -import java.text.SimpleDateFormat; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Date; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Locale; -import java.util.Set; - -import static java.net.HttpURLConnection.HTTP_CREATED; -import static java.net.HttpURLConnection.HTTP_OK; - -/** - * Ozone REST Client Implementation, it connects Ozone Handler to execute - * client calls. This uses REST protocol for the communication with server. - */ -public class OzoneRestClient implements OzoneClient, Closeable { - - private static final Logger LOG = - LoggerFactory.getLogger(OzoneRestClient.class); - - private static final String SCHEMA = "http://"; - private static final int DEFAULT_OZONE_PORT = 50070; - - private final URI uri; - private final UserGroupInformation ugi; - private final OzoneAcl.OzoneACLRights userRights; - private final OzoneAcl.OzoneACLRights groupRights; - - - /** - * Creates OzoneRpcClient instance with new OzoneConfiguration. - * - * @throws IOException - */ - public OzoneRestClient() throws IOException, URISyntaxException { - this(new OzoneConfiguration()); - } - - /** - * Creates OzoneRpcClient instance with the given configuration. - * - * @param conf - * - * @throws IOException - */ - public OzoneRestClient(Configuration conf) - throws IOException { - Preconditions.checkNotNull(conf); - this.ugi = UserGroupInformation.getCurrentUser(); - this.userRights = conf.getEnum(KSMConfigKeys.OZONE_KSM_USER_RIGHTS, - KSMConfigKeys.OZONE_KSM_USER_RIGHTS_DEFAULT); - this.groupRights = conf.getEnum(KSMConfigKeys.OZONE_KSM_GROUP_RIGHTS, - KSMConfigKeys.OZONE_KSM_GROUP_RIGHTS_DEFAULT); - - //TODO: get uri from property ozone.reset.servers - URIBuilder ozoneURI = null; - try { - ozoneURI = new URIBuilder(SCHEMA + "localhost"); - if (ozoneURI.getPort() == 0) { - ozoneURI.setPort(DEFAULT_OZONE_PORT); - } - uri = ozoneURI.build(); - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - @Override - public void createVolume(String volumeName) - throws IOException { - createVolume(volumeName, ugi.getUserName()); - } - - @Override - public void createVolume(String volumeName, String owner) - throws IOException { - - createVolume(volumeName, owner, OzoneConsts.MAX_QUOTA_IN_BYTES, - (OzoneAcl[])null); - } - - @Override - public void createVolume(String volumeName, String owner, - OzoneAcl... acls) - throws IOException { - createVolume(volumeName, owner, OzoneConsts.MAX_QUOTA_IN_BYTES, acls); - } - - @Override - public void createVolume(String volumeName, String owner, - long quota) - throws IOException { - createVolume(volumeName, owner, quota, (OzoneAcl[])null); - } - - @Override - public void createVolume(String volumeName, String owner, - long quota, OzoneAcl... acls) - throws IOException { - Preconditions.checkNotNull(volumeName); - Preconditions.checkNotNull(owner); - Preconditions.checkNotNull(quota); - Preconditions.checkState(quota >= 0); - - Set aclSet = new HashSet<>(); - - if(acls != null) { - aclSet.addAll(Arrays.asList(acls)); - } - - LOG.info("Creating Volume: {}, with {} as owner and " + - "quota set to {} bytes.", volumeName, owner, quota); - HttpPost httpPost = null; - HttpEntity entity = null; - try (CloseableHttpClient httpClient = OzoneClientUtils.newHttpClient()) { - URIBuilder builder = new URIBuilder(uri); - builder.setPath("/" + volumeName); - String quotaString = quota + Header.OZONE_QUOTA_BYTES; - builder.setParameter(Header.OZONE_QUOTA_QUERY_TAG, quotaString); - httpPost = getHttpPost(owner, builder.build().toString()); - for (OzoneAcl acl : aclSet) { - httpPost.addHeader( - Header.OZONE_ACLS, Header.OZONE_ACL_ADD + " " + acl.toString()); - } - - HttpResponse response = httpClient.execute(httpPost); - entity = response.getEntity(); - int errorCode = response.getStatusLine().getStatusCode(); - if ((errorCode == HTTP_OK) || (errorCode == HTTP_CREATED)) { - return; - } - if (entity != null) { - throw new IOException(EntityUtils.toString(entity)); - } else { - throw new IOException("Unexpected null in http payload"); - } - } catch (URISyntaxException | IllegalArgumentException ex) { - throw new IOException(ex.getMessage()); - } finally { - EntityUtils.consume(entity); - OzoneClientUtils.releaseConnection(httpPost); - } - } - - @Override - public void setVolumeOwner(String volumeName, String owner) - throws IOException { - Preconditions.checkNotNull(volumeName); - Preconditions.checkNotNull(owner); - throw new UnsupportedOperationException("Not yet implemented."); - } - - @Override - public void setVolumeQuota(String volumeName, long quota) - throws IOException { - Preconditions.checkNotNull(volumeName); - Preconditions.checkNotNull(quota); - Preconditions.checkState(quota >= 0); - throw new UnsupportedOperationException("Not yet implemented."); - } - - @Override - public OzoneVolume getVolumeDetails(String volumeName) - throws IOException { - Preconditions.checkNotNull(volumeName); - throw new UnsupportedOperationException("Not yet implemented."); - } - - @Override - public boolean checkVolumeAccess(String volumeName, OzoneAcl acl) - throws IOException { - Preconditions.checkNotNull(volumeName); - throw new UnsupportedOperationException("Not yet implemented."); - } - - @Override - public void deleteVolume(String volumeName) - throws IOException { - Preconditions.checkNotNull(volumeName); - throw new UnsupportedOperationException("Not yet implemented."); - } - - @Override - public Iterator listVolumes(String volumePrefix) - throws IOException { - throw new UnsupportedOperationException("Not yet implemented."); - } - - @Override - public Iterator listVolumes(String volumePrefix, - String user) - throws IOException { - throw new UnsupportedOperationException("Not yet implemented."); - } - - @Override - public void createBucket(String volumeName, String bucketName) - throws IOException { - createBucket(volumeName, bucketName, Versioning.NOT_DEFINED, - StorageType.DEFAULT, (OzoneAcl[])null); - } - - @Override - public void createBucket(String volumeName, String bucketName, - Versioning versioning) - throws IOException { - createBucket(volumeName, bucketName, versioning, - StorageType.DEFAULT, (OzoneAcl[])null); - } - - @Override - public void createBucket(String volumeName, String bucketName, - StorageType storageType) - throws IOException { - createBucket(volumeName, bucketName, Versioning.NOT_DEFINED, - storageType, (OzoneAcl[])null); - } - - @Override - public void createBucket(String volumeName, String bucketName, - OzoneAcl... acls) - throws IOException { - createBucket(volumeName, bucketName, Versioning.NOT_DEFINED, - StorageType.DEFAULT, acls); - } - - @Override - public void createBucket(String volumeName, String bucketName, - Versioning versioning, StorageType storageType, - OzoneAcl... acls) - throws IOException { - Preconditions.checkNotNull(volumeName); - Preconditions.checkNotNull(bucketName); - Preconditions.checkNotNull(versioning); - Preconditions.checkNotNull(storageType); - - String owner = ugi.getUserName(); - final List listOfAcls = new ArrayList<>(); - - //User ACL - OzoneAcl userAcl = - new OzoneAcl(OzoneAcl.OzoneACLType.USER, - owner, userRights); - listOfAcls.add(userAcl); - - //Group ACLs of the User - List userGroups = Arrays.asList(UserGroupInformation - .createRemoteUser(owner).getGroupNames()); - userGroups.stream().forEach((group) -> listOfAcls.add( - new OzoneAcl(OzoneAcl.OzoneACLType.GROUP, group, groupRights))); - - //ACLs passed as argument - if(acls != null) { - Arrays.stream(acls).forEach((acl) -> listOfAcls.add(acl)); - } - - LOG.info("Creating Bucket: {}/{}, with Versioning {} and " + - "Storage Type set to {}", volumeName, bucketName, versioning, - storageType); - throw new UnsupportedOperationException("Not yet implemented."); - } - - /** - * Converts OzoneConts.Versioning enum to boolean. - * - * @param version - * @return corresponding boolean value - */ - private boolean getBucketVersioningProtobuf( - Versioning version) { - if(version != null) { - switch(version) { - case ENABLED: - return true; - case NOT_DEFINED: - case DISABLED: - default: - return false; - } - } - return false; - } - - @Override - public void addBucketAcls(String volumeName, String bucketName, - List addAcls) - throws IOException { - Preconditions.checkNotNull(volumeName); - Preconditions.checkNotNull(bucketName); - Preconditions.checkNotNull(addAcls); - throw new UnsupportedOperationException("Not yet implemented."); - } - - @Override - public void removeBucketAcls(String volumeName, String bucketName, - List removeAcls) - throws IOException { - Preconditions.checkNotNull(volumeName); - Preconditions.checkNotNull(bucketName); - Preconditions.checkNotNull(removeAcls); - throw new UnsupportedOperationException("Not yet implemented."); - } - - @Override - public void setBucketVersioning(String volumeName, String bucketName, - Versioning versioning) - throws IOException { - Preconditions.checkNotNull(volumeName); - Preconditions.checkNotNull(bucketName); - Preconditions.checkNotNull(versioning); - throw new UnsupportedOperationException("Not yet implemented."); - } - - @Override - public void setBucketStorageType(String volumeName, String bucketName, - StorageType storageType) - throws IOException { - Preconditions.checkNotNull(volumeName); - Preconditions.checkNotNull(bucketName); - Preconditions.checkNotNull(storageType); - throw new UnsupportedOperationException("Not yet implemented."); - } - - @Override - public void deleteBucket(String volumeName, String bucketName) - throws IOException { - Preconditions.checkNotNull(volumeName); - Preconditions.checkNotNull(bucketName); - throw new UnsupportedOperationException("Not yet implemented."); - } - - @Override - public void checkBucketAccess(String volumeName, String bucketName) - throws IOException { - throw new UnsupportedOperationException("Not yet implemented."); - } - - @Override - public OzoneBucket getBucketDetails(String volumeName, - String bucketName) - throws IOException { - Preconditions.checkNotNull(volumeName); - Preconditions.checkNotNull(bucketName); - throw new UnsupportedOperationException("Not yet implemented."); - } - - @Override - public Iterator listBuckets(String volumeName, - String bucketPrefix) - throws IOException { - throw new UnsupportedOperationException("Not yet implemented."); - } - - @Override - public OzoneOutputStream createKey(String volumeName, String bucketName, - String keyName, long size) - throws IOException { - throw new UnsupportedOperationException("Not yet implemented."); - } - - @Override - public OzoneInputStream getKey(String volumeName, String bucketName, - String keyName) - throws IOException { - Preconditions.checkNotNull(volumeName); - Preconditions.checkNotNull(bucketName); - Preconditions.checkNotNull(keyName); - throw new UnsupportedOperationException("Not yet implemented."); - } - - @Override - public void deleteKey(String volumeName, String bucketName, - String keyName) - throws IOException { - Preconditions.checkNotNull(volumeName); - Preconditions.checkNotNull(bucketName); - Preconditions.checkNotNull(keyName); - throw new UnsupportedOperationException("Not yet implemented."); - } - - @Override - public List listKeys(String volumeName, String bucketName, - String keyPrefix) - throws IOException { - throw new UnsupportedOperationException("Not yet implemented."); - } - - @Override - public OzoneKey getKeyDetails(String volumeName, String bucketName, - String keyName) - throws IOException { - Preconditions.checkNotNull(volumeName); - Preconditions.checkNotNull(bucketName); - Preconditions.checkNotNull(keyName); - throw new UnsupportedOperationException("Not yet implemented."); - } - - /** - * Converts Versioning to boolean. - * - * @param version - * @return corresponding boolean value - */ - private boolean getBucketVersioningFlag( - Versioning version) { - if(version != null) { - switch(version) { - case ENABLED: - return true; - case DISABLED: - case NOT_DEFINED: - default: - return false; - } - } - return false; - } - - /** - * Returns a standard HttpPost Object to use for ozone post requests. - * - * @param user - If the use is being made on behalf of user, that user - * @param uriString - UriString - * @return HttpPost - */ - public HttpPost getHttpPost(String user, String uriString) { - HttpPost httpPost = new HttpPost(uriString); - addOzoneHeaders(httpPost); - if (user != null) { - httpPost.addHeader(Header.OZONE_USER, user); - } - return httpPost; - } - - /** - * Add Ozone Headers. - * - * @param httpRequest - Http Request - */ - private void addOzoneHeaders(HttpRequestBase httpRequest) { - SimpleDateFormat format = - new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US); - - httpRequest.addHeader(Header.OZONE_VERSION_HEADER, - Header.OZONE_V1_VERSION_HEADER); - httpRequest.addHeader(HttpHeaders.DATE, - format.format(new Date(Time.monotonicNow()))); - httpRequest.addHeader(HttpHeaders.AUTHORIZATION, - Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " + - ugi.getUserName()); - } - - @Override - public void close() throws IOException { - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java new file mode 100644 index 00000000000..b525aa83f8e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java @@ -0,0 +1,209 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.client.rest; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.client.BucketArgs; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneKey; +import org.apache.hadoop.ozone.client.OzoneQuota; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.client.io.OzoneInputStream; +import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.client.protocol.ClientProtocol; + +import java.io.IOException; +import java.util.Iterator; +import java.util.List; + +/** + * Ozone Client REST protocol implementation. It uses REST protocol to + * connect to Ozone Handler that executes client calls + */ +public class RestClient implements ClientProtocol { + + /** + * Creates RestClient instance with the given configuration. + * @param conf Configuration + * @throws IOException + */ + public RestClient(Configuration conf) + throws IOException { + Preconditions.checkNotNull(conf); + } + + @Override + public void createVolume(String volumeName) throws IOException { + throw new UnsupportedOperationException("Not yet implemented."); + } + + @Override + public void createVolume( + String volumeName, org.apache.hadoop.ozone.client.VolumeArgs args) + throws IOException { + throw new UnsupportedOperationException("Not yet implemented."); + } + + @Override + public void setVolumeOwner(String volumeName, String owner) + throws IOException { + throw new UnsupportedOperationException("Not yet implemented."); + } + + @Override + public void setVolumeQuota(String volumeName, OzoneQuota quota) + throws IOException { + throw new UnsupportedOperationException("Not yet implemented."); + } + + @Override + public OzoneVolume getVolumeDetails(String volumeName) + throws IOException { + throw new UnsupportedOperationException("Not yet implemented."); + } + + @Override + public boolean checkVolumeAccess(String volumeName, OzoneAcl acl) + throws IOException { + throw new UnsupportedOperationException("Not yet implemented."); + } + + @Override + public void deleteVolume(String volumeName) throws IOException { + throw new UnsupportedOperationException("Not yet implemented."); + } + + @Override + public Iterator listVolumes(String volumePrefix) + throws IOException { + throw new UnsupportedOperationException("Not yet implemented."); + } + + @Override + public Iterator listVolumes(String volumePrefix, String user) + throws IOException { + throw new UnsupportedOperationException("Not yet implemented."); + } + + @Override + public void createBucket(String volumeName, String bucketName) + throws IOException { + throw new UnsupportedOperationException("Not yet implemented."); + } + + @Override + public void createBucket( + String volumeName, String bucketName, BucketArgs bucketArgs) + throws IOException { + throw new UnsupportedOperationException("Not yet implemented."); + } + + @Override + public void addBucketAcls( + String volumeName, String bucketName, List addAcls) + throws IOException { + throw new UnsupportedOperationException("Not yet implemented."); + } + + @Override + public void removeBucketAcls( + String volumeName, String bucketName, List removeAcls) + throws IOException { + throw new UnsupportedOperationException("Not yet implemented."); + } + + @Override + public void setBucketVersioning( + String volumeName, String bucketName, Boolean versioning) + throws IOException { + throw new UnsupportedOperationException("Not yet implemented."); + } + + @Override + public void setBucketStorageType( + String volumeName, String bucketName, StorageType storageType) + throws IOException { + throw new UnsupportedOperationException("Not yet implemented."); + } + + @Override + public void deleteBucket(String volumeName, String bucketName) + throws IOException { + throw new UnsupportedOperationException("Not yet implemented."); + } + + @Override + public void checkBucketAccess(String volumeName, String bucketName) + throws IOException { + throw new UnsupportedOperationException("Not yet implemented."); + } + + @Override + public OzoneBucket getBucketDetails(String volumeName, String bucketName) + throws IOException { + throw new UnsupportedOperationException("Not yet implemented."); + } + + @Override + public Iterator listBuckets( + String volumeName, String bucketPrefix) throws IOException { + throw new UnsupportedOperationException("Not yet implemented."); + } + + @Override + public OzoneOutputStream createKey( + String volumeName, String bucketName, String keyName, long size) + throws IOException { + throw new UnsupportedOperationException("Not yet implemented."); + } + + @Override + public OzoneInputStream getKey( + String volumeName, String bucketName, String keyName) + throws IOException { + throw new UnsupportedOperationException("Not yet implemented."); + } + + @Override + public void deleteKey(String volumeName, String bucketName, String keyName) + throws IOException { + throw new UnsupportedOperationException("Not yet implemented."); + } + + @Override + public Iterator listKeys( + String volumeName, String bucketName, String keyPrefix) + throws IOException { + throw new UnsupportedOperationException("Not yet implemented."); + } + + @Override + public OzoneKey getKeyDetails( + String volumeName, String bucketName, String keyName) + throws IOException { + throw new UnsupportedOperationException("Not yet implemented."); + } + + @Override + public void close() throws IOException { + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/headers/Header.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/headers/Header.java index 5221a0e7d85..7e7995b2ced 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/headers/Header.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/headers/Header.java @@ -1,19 +1,19 @@ -/* +/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.ozone.client.rest.headers; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/headers/package-info.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/headers/package-info.java index 54157f0c024..340709f492d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/headers/package-info.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/headers/package-info.java @@ -1,4 +1,4 @@ -/* +/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,19 +7,15 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ -/** - * Ozone HTTP header definitions. - */ @InterfaceAudience.Private package org.apache.hadoop.ozone.client.rest.headers; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java similarity index 60% rename from hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClient.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 13cb370746f..6464c5deba2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -1,18 +1,19 @@ /** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

+ * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.ozone.client.rpc; @@ -24,7 +25,19 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.Client; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.client.BucketArgs; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneKey; +import org.apache.hadoop.ozone.client.OzoneQuota; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.client.VolumeArgs; +import org.apache.hadoop.ozone.client.io.ChunkGroupInputStream; +import org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream; import org.apache.hadoop.ozone.client.io.LengthInputStream; +import org.apache.hadoop.ozone.client.io.OzoneInputStream; +import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs; import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo; import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs; @@ -36,20 +49,9 @@ import org.apache.hadoop.ozone.ksm.protocolPB .KeySpaceManagerProtocolPB; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.OzoneConfiguration; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientUtils; -import org.apache.hadoop.ozone.client.OzoneKey; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.io.ChunkGroupInputStream; -import org.apache.hadoop.ozone.client.io.OzoneInputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.ksm.KSMConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts.Versioning; import org.apache.hadoop.ozone.protocolPB.KSMPBHelper; -import org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream; import org.apache.hadoop.scm.ScmConfigKeys; import org.apache.hadoop.scm.XceiverClientManager; import org.apache.hadoop.scm.protocolPB @@ -57,11 +59,9 @@ import org.apache.hadoop.scm.protocolPB import org.apache.hadoop.scm.protocolPB .StorageContainerLocationProtocolPB; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.Closeable; import java.io.IOException; import java.net.InetSocketAddress; import java.util.ArrayList; @@ -76,10 +76,10 @@ import java.util.stream.Collectors; * to execute client calls. This uses RPC protocol for communication * with the servers. */ -public class OzoneRpcClient implements OzoneClient, Closeable { +public class RpcClient implements ClientProtocol { private static final Logger LOG = - LoggerFactory.getLogger(OzoneRpcClient.class); + LoggerFactory.getLogger(RpcClient.class); private final StorageContainerLocationProtocolClientSideTranslatorPB storageContainerLocationClient; @@ -93,23 +93,12 @@ public class OzoneRpcClient implements OzoneClient, Closeable { private final OzoneAcl.OzoneACLRights userRights; private final OzoneAcl.OzoneACLRights groupRights; - /** - * Creates OzoneRpcClient instance with new OzoneConfiguration. - * - * @throws IOException - */ - public OzoneRpcClient() throws IOException { - this(new OzoneConfiguration()); - } - /** - * Creates OzoneRpcClient instance with the given configuration. - * + * Creates RpcClient instance with the given configuration. * @param conf - * * @throws IOException */ - public OzoneRpcClient(Configuration conf) throws IOException { + public RpcClient(Configuration conf) throws IOException { Preconditions.checkNotNull(conf); this.ugi = UserGroupInformation.getCurrentUser(); this.userRights = conf.getEnum(KSMConfigKeys.OZONE_KSM_USER_RIGHTS, @@ -159,66 +148,44 @@ public class OzoneRpcClient implements OzoneClient, Closeable { } @Override - public void createVolume(String volumeName) - throws IOException { - createVolume(volumeName, ugi.getUserName()); + public void createVolume(String volumeName) throws IOException { + createVolume(volumeName, VolumeArgs.newBuilder().build()); } @Override - public void createVolume(String volumeName, String owner) - throws IOException { - - createVolume(volumeName, owner, OzoneConsts.MAX_QUOTA_IN_BYTES, - (OzoneAcl[])null); - } - - @Override - public void createVolume(String volumeName, String owner, - OzoneAcl... acls) - throws IOException { - createVolume(volumeName, owner, OzoneConsts.MAX_QUOTA_IN_BYTES, acls); - } - - @Override - public void createVolume(String volumeName, String owner, - long quota) - throws IOException { - createVolume(volumeName, owner, quota, (OzoneAcl[])null); - } - - @Override - public void createVolume(String volumeName, String owner, - long quota, OzoneAcl... acls) + public void createVolume(String volumeName, VolumeArgs volArgs) throws IOException { Preconditions.checkNotNull(volumeName); - Preconditions.checkNotNull(owner); - Preconditions.checkNotNull(quota); - Preconditions.checkState(quota >= 0); - OzoneAcl userAcl = - new OzoneAcl(OzoneAcl.OzoneACLType.USER, - owner, userRights); - KsmVolumeArgs.Builder builder = KsmVolumeArgs.newBuilder(); - builder.setAdminName(ugi.getUserName()) - .setOwnerName(owner) - .setVolume(volumeName) - .setQuotaInBytes(quota) - .setCreationTime(Time.now()) - .addOzoneAcls(KSMPBHelper.convertOzoneAcl(userAcl)); + Preconditions.checkNotNull(volArgs); + String admin = volArgs.getAdmin() == null ? + ugi.getUserName() : volArgs.getAdmin(); + String owner = volArgs.getOwner() == null ? + ugi.getUserName() : volArgs.getOwner(); + long quota = volArgs.getQuota() == null ? + OzoneConsts.MAX_QUOTA_IN_BYTES : + OzoneQuota.parseQuota(volArgs.getQuota()).sizeInBytes(); List listOfAcls = new ArrayList<>(); - + //User ACL + listOfAcls.add(new OzoneAcl(OzoneAcl.OzoneACLType.USER, + owner, userRights)); //Group ACLs of the User List userGroups = Arrays.asList(UserGroupInformation .createRemoteUser(owner).getGroupNames()); userGroups.stream().forEach((group) -> listOfAcls.add( new OzoneAcl(OzoneAcl.OzoneACLType.GROUP, group, groupRights))); - - //ACLs passed as argument - if(acls != null) { - listOfAcls.addAll(Arrays.asList(acls)); + //ACLs from VolumeArgs + if(volArgs.getAcls() != null) { + listOfAcls.addAll(volArgs.getAcls()); } - //Remove duplicates and set + KsmVolumeArgs.Builder builder = KsmVolumeArgs.newBuilder(); + builder.setVolume(volumeName); + builder.setAdminName(admin); + builder.setOwnerName(owner); + builder.setQuotaInBytes(quota); + + //Remove duplicates and add ACLs for (OzoneAcl ozoneAcl : listOfAcls.stream().distinct().collect(Collectors.toList())) { builder.addOzoneAcls(KSMPBHelper.convertOzoneAcl(ozoneAcl)); @@ -238,34 +205,33 @@ public class OzoneRpcClient implements OzoneClient, Closeable { } @Override - public void setVolumeQuota(String volumeName, long quota) + public void setVolumeQuota(String volumeName, OzoneQuota quota) throws IOException { Preconditions.checkNotNull(volumeName); Preconditions.checkNotNull(quota); - Preconditions.checkState(quota >= 0); - keySpaceManagerClient.setQuota(volumeName, quota); + long quotaInBytes = quota.sizeInBytes(); + keySpaceManagerClient.setQuota(volumeName, quotaInBytes); } @Override public OzoneVolume getVolumeDetails(String volumeName) throws IOException { Preconditions.checkNotNull(volumeName); - KsmVolumeArgs volumeArgs = - keySpaceManagerClient.getVolumeInfo(volumeName); - return new OzoneVolume(volumeArgs); + KsmVolumeArgs volume = keySpaceManagerClient.getVolumeInfo(volumeName); + return new OzoneVolume(volume.getVolume(), volume.getAdminName(), + volume.getOwnerName(), volume.getQuotaInBytes(), + volume.getAclMap().ozoneAclGetProtobuf().stream(). + map(KSMPBHelper::convertOzoneAcl).collect(Collectors.toList())); } @Override public boolean checkVolumeAccess(String volumeName, OzoneAcl acl) throws IOException { - Preconditions.checkNotNull(volumeName); - return keySpaceManagerClient.checkVolumeAccess(volumeName, - KSMPBHelper.convertOzoneAcl(acl)); + throw new UnsupportedOperationException("Not yet implemented."); } @Override - public void deleteVolume(String volumeName) - throws IOException { + public void deleteVolume(String volumeName) throws IOException { Preconditions.checkNotNull(volumeName); keySpaceManagerClient.deleteVolume(volumeName); } @@ -277,8 +243,7 @@ public class OzoneRpcClient implements OzoneClient, Closeable { } @Override - public Iterator listVolumes(String volumePrefix, - String user) + public Iterator listVolumes(String volumePrefix, String user) throws IOException { throw new UnsupportedOperationException("Not yet implemented."); } @@ -286,104 +251,51 @@ public class OzoneRpcClient implements OzoneClient, Closeable { @Override public void createBucket(String volumeName, String bucketName) throws IOException { - createBucket(volumeName, bucketName, Versioning.NOT_DEFINED, - StorageType.DEFAULT, (OzoneAcl[])null); + createBucket(volumeName, bucketName, BucketArgs.newBuilder().build()); } @Override - public void createBucket(String volumeName, String bucketName, - Versioning versioning) - throws IOException { - createBucket(volumeName, bucketName, versioning, - StorageType.DEFAULT, (OzoneAcl[])null); - } - - @Override - public void createBucket(String volumeName, String bucketName, - StorageType storageType) - throws IOException { - createBucket(volumeName, bucketName, Versioning.NOT_DEFINED, - storageType, (OzoneAcl[])null); - } - - @Override - public void createBucket(String volumeName, String bucketName, - OzoneAcl... acls) - throws IOException { - createBucket(volumeName, bucketName, Versioning.NOT_DEFINED, - StorageType.DEFAULT, acls); - } - - @Override - public void createBucket(String volumeName, String bucketName, - Versioning versioning, StorageType storageType, - OzoneAcl... acls) + public void createBucket( + String volumeName, String bucketName, BucketArgs bucketArgs) throws IOException { Preconditions.checkNotNull(volumeName); Preconditions.checkNotNull(bucketName); - Preconditions.checkNotNull(versioning); - Preconditions.checkNotNull(storageType); + Preconditions.checkNotNull(bucketArgs); + + Boolean isVersionEnabled = bucketArgs.isVersionEnabled() == null ? + false : bucketArgs.isVersionEnabled(); + StorageType storageType = bucketArgs.getStorageType() == null ? + StorageType.DEFAULT : bucketArgs.getStorageType(); + List listOfAcls = new ArrayList<>(); + //User ACL + listOfAcls.add(new OzoneAcl(OzoneAcl.OzoneACLType.USER, + ugi.getUserName(), userRights)); + //Group ACLs of the User + List userGroups = Arrays.asList(UserGroupInformation + .createRemoteUser(ugi.getUserName()).getGroupNames()); + userGroups.stream().forEach((group) -> listOfAcls.add( + new OzoneAcl(OzoneAcl.OzoneACLType.GROUP, group, groupRights))); + //ACLs from BucketArgs + if(bucketArgs.getAcls() != null) { + listOfAcls.addAll(bucketArgs.getAcls()); + } KsmBucketInfo.Builder builder = KsmBucketInfo.newBuilder(); builder.setVolumeName(volumeName) .setBucketName(bucketName) + .setIsVersionEnabled(isVersionEnabled) .setStorageType(storageType) - .setIsVersionEnabled(getBucketVersioningProtobuf( - versioning)); + .setAcls(listOfAcls.stream().distinct().collect(Collectors.toList())); - String owner = ugi.getUserName(); - final List listOfAcls = new ArrayList<>(); - - //User ACL - OzoneAcl userAcl = - new OzoneAcl(OzoneAcl.OzoneACLType.USER, - owner, userRights); - listOfAcls.add(userAcl); - - //Group ACLs of the User - List userGroups = Arrays.asList(UserGroupInformation - .createRemoteUser(owner).getGroupNames()); - userGroups.stream().forEach((group) -> listOfAcls.add( - new OzoneAcl(OzoneAcl.OzoneACLType.GROUP, group, groupRights))); - - //ACLs passed as argument - if(acls != null) { - Arrays.stream(acls).forEach((acl) -> listOfAcls.add(acl)); - } - - //Remove duplicates and set - builder.setAcls(listOfAcls.stream().distinct() - .collect(Collectors.toList())); LOG.info("Creating Bucket: {}/{}, with Versioning {} and " + - "Storage Type set to {}", volumeName, bucketName, versioning, - storageType); + "Storage Type set to {}", volumeName, bucketName, isVersionEnabled, + storageType); keySpaceManagerClient.createBucket(builder.build()); } - /** - * Converts OzoneConts.Versioning enum to boolean. - * - * @param version - * @return corresponding boolean value - */ - private boolean getBucketVersioningProtobuf( - Versioning version) { - if(version != null) { - switch(version) { - case ENABLED: - return true; - case NOT_DEFINED: - case DISABLED: - default: - return false; - } - } - return false; - } - @Override - public void addBucketAcls(String volumeName, String bucketName, - List addAcls) + public void addBucketAcls( + String volumeName, String bucketName, List addAcls) throws IOException { Preconditions.checkNotNull(volumeName); Preconditions.checkNotNull(bucketName); @@ -396,8 +308,8 @@ public class OzoneRpcClient implements OzoneClient, Closeable { } @Override - public void removeBucketAcls(String volumeName, String bucketName, - List removeAcls) + public void removeBucketAcls( + String volumeName, String bucketName, List removeAcls) throws IOException { Preconditions.checkNotNull(volumeName); Preconditions.checkNotNull(bucketName); @@ -410,8 +322,8 @@ public class OzoneRpcClient implements OzoneClient, Closeable { } @Override - public void setBucketVersioning(String volumeName, String bucketName, - Versioning versioning) + public void setBucketVersioning( + String volumeName, String bucketName, Boolean versioning) throws IOException { Preconditions.checkNotNull(volumeName); Preconditions.checkNotNull(bucketName); @@ -419,14 +331,13 @@ public class OzoneRpcClient implements OzoneClient, Closeable { KsmBucketArgs.Builder builder = KsmBucketArgs.newBuilder(); builder.setVolumeName(volumeName) .setBucketName(bucketName) - .setIsVersionEnabled(getBucketVersioningFlag( - versioning)); + .setIsVersionEnabled(versioning); keySpaceManagerClient.setBucketProperty(builder.build()); } @Override - public void setBucketStorageType(String volumeName, String bucketName, - StorageType storageType) + public void setBucketStorageType( + String volumeName, String bucketName, StorageType storageType) throws IOException { Preconditions.checkNotNull(volumeName); Preconditions.checkNotNull(bucketName); @@ -439,40 +350,42 @@ public class OzoneRpcClient implements OzoneClient, Closeable { } @Override - public void deleteBucket(String volumeName, String bucketName) - throws IOException { + public void deleteBucket( + String volumeName, String bucketName) throws IOException { Preconditions.checkNotNull(volumeName); Preconditions.checkNotNull(bucketName); keySpaceManagerClient.deleteBucket(volumeName, bucketName); } @Override - public void checkBucketAccess(String volumeName, String bucketName) - throws IOException { - throw new UnsupportedOperationException("Not yet implemented."); + public void checkBucketAccess( + String volumeName, String bucketName) throws IOException { + } @Override - public OzoneBucket getBucketDetails(String volumeName, - String bucketName) - throws IOException { + public OzoneBucket getBucketDetails( + String volumeName, String bucketName) throws IOException { Preconditions.checkNotNull(volumeName); Preconditions.checkNotNull(bucketName); - KsmBucketInfo bucketInfo = + KsmBucketInfo bucketArgs = keySpaceManagerClient.getBucketInfo(volumeName, bucketName); - return new OzoneBucket(bucketInfo); + return new OzoneBucket(bucketArgs.getVolumeName(), + bucketArgs.getBucketName(), + bucketArgs.getAcls(), + bucketArgs.getStorageType(), + bucketArgs.getIsVersionEnabled()); } @Override - public Iterator listBuckets(String volumeName, - String bucketPrefix) - throws IOException { + public Iterator listBuckets( + String volumeName, String bucketPrefix) throws IOException { throw new UnsupportedOperationException("Not yet implemented."); } @Override - public OzoneOutputStream createKey(String volumeName, String bucketName, - String keyName, long size) + public OzoneOutputStream createKey( + String volumeName, String bucketName, String keyName, long size) throws IOException { String requestId = UUID.randomUUID().toString(); KsmKeyArgs keyArgs = new KsmKeyArgs.Builder() @@ -483,15 +396,15 @@ public class OzoneRpcClient implements OzoneClient, Closeable { .build(); KsmKeyInfo keyInfo = keySpaceManagerClient.allocateKey(keyArgs); - ChunkGroupOutputStream groupOutputStream = + ChunkGroupOutputStream groupOutputStream = ChunkGroupOutputStream.getFromKsmKeyInfo(keyInfo, xceiverClientManager, - storageContainerLocationClient, chunkSize, requestId); + storageContainerLocationClient, chunkSize, requestId); return new OzoneOutputStream(groupOutputStream); } @Override - public OzoneInputStream getKey(String volumeName, String bucketName, - String keyName) + public OzoneInputStream getKey( + String volumeName, String bucketName, String keyName) throws IOException { Preconditions.checkNotNull(volumeName); Preconditions.checkNotNull(bucketName); @@ -505,15 +418,15 @@ public class OzoneRpcClient implements OzoneClient, Closeable { KsmKeyInfo keyInfo = keySpaceManagerClient.lookupKey(keyArgs); LengthInputStream lengthInputStream = ChunkGroupInputStream.getFromKsmKeyInfo( - keyInfo, xceiverClientManager, storageContainerLocationClient, - requestId); + keyInfo, xceiverClientManager, storageContainerLocationClient, + requestId); return new OzoneInputStream( (ChunkGroupInputStream)lengthInputStream.getWrappedStream()); } @Override - public void deleteKey(String volumeName, String bucketName, - String keyName) + public void deleteKey( + String volumeName, String bucketName, String keyName) throws IOException { Preconditions.checkNotNull(volumeName); Preconditions.checkNotNull(bucketName); @@ -527,15 +440,15 @@ public class OzoneRpcClient implements OzoneClient, Closeable { } @Override - public List listKeys(String volumeName, String bucketName, - String keyPrefix) + public Iterator listKeys( + String volumeName, String bucketName, String keyPrefix) throws IOException { throw new UnsupportedOperationException("Not yet implemented."); } @Override - public OzoneKey getKeyDetails(String volumeName, String bucketName, - String keyName) + public OzoneKey getKeyDetails( + String volumeName, String bucketName, String keyName) throws IOException { Preconditions.checkNotNull(volumeName); Preconditions.checkNotNull(bucketName); @@ -545,30 +458,11 @@ public class OzoneRpcClient implements OzoneClient, Closeable { .setBucketName(bucketName) .setKeyName(keyName) .build(); - KsmKeyInfo keyInfo = - keySpaceManagerClient.lookupKey(keyArgs); - return new OzoneKey(keyInfo); - } - - /** - * Converts Versioning to boolean. - * - * @param version - * @return corresponding boolean value - */ - private boolean getBucketVersioningFlag( - Versioning version) { - if(version != null) { - switch(version) { - case ENABLED: - return true; - case DISABLED: - case NOT_DEFINED: - default: - return false; - } - } - return false; + KsmKeyInfo keyInfo = keySpaceManagerClient.lookupKey(keyArgs); + return new OzoneKey(keyInfo.getVolumeName(), + keyInfo.getBucketName(), + keyInfo.getKeyName(), + keyInfo.getDataSize()); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/tools/Corona.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/tools/Corona.java index be8a191b968..18785a07e00 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/tools/Corona.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/tools/Corona.java @@ -28,9 +28,12 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.ozone.OzoneConfiguration; +import org.apache.hadoop.ozone.client.io.OzoneInputStream; +import org.apache.hadoop.ozone.client.ObjectStore; +import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.io.OzoneInputStream; +import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.util.GenericOptionsParser; import org.apache.hadoop.util.Tool; @@ -123,6 +126,7 @@ public final class Corona extends Configured implements Tool { private boolean validateWrites; private OzoneClient ozoneClient; + private ObjectStore objectStore; private ExecutorService processor; private long startTime; @@ -157,6 +161,7 @@ public final class Corona extends Configured implements Tool { numberOfKeysAdded = new AtomicLong(); OzoneClientFactory.setConfiguration(conf); ozoneClient = OzoneClientFactory.getClient(); + objectStore = ozoneClient.getObjectStore(); } @Override @@ -208,6 +213,7 @@ public final class Corona extends Configured implements Tool { if(validateWrites) { validator.join(); } + ozoneClient.close(); return 0; } @@ -334,30 +340,32 @@ public final class Corona extends Configured implements Tool { private int totalBuckets; private int totalKeys; - private String volume; + private OzoneVolume volume; - OfflineProcessor(String volume) throws Exception { + OfflineProcessor(String volumeName) throws Exception { this.totalBuckets = Integer.parseInt(numOfBuckets); this.totalKeys = Integer.parseInt(numOfKeys); - this.volume = volume; - LOG.trace("Creating volume: {}", volume); + LOG.trace("Creating volume: {}", volumeName); long start = System.nanoTime(); - ozoneClient.createVolume(this.volume); + objectStore.createVolume(volumeName); volumeCreationTime.getAndAdd(System.nanoTime() - start); numberOfVolumesCreated.getAndIncrement(); + volume = objectStore.getVolume(volumeName); } @Override public void run() { for (int j = 0; j < totalBuckets; j++) { - String bucket = "bucket-" + j + "-" + + String bucketName = "bucket-" + j + "-" + RandomStringUtils.randomNumeric(5); try { - LOG.trace("Creating bucket: {} in volume: {}", bucket, volume); + LOG.trace("Creating bucket: {} in volume: {}", + bucketName, volume.getName()); long start = System.nanoTime(); - ozoneClient.createBucket(volume, bucket); + volume.createBucket(bucketName); bucketCreationTime.getAndAdd(System.nanoTime() - start); numberOfBucketsCreated.getAndIncrement(); + OzoneBucket bucket = volume.getBucket(bucketName); for (int k = 0; k < totalKeys; k++) { String key = "key-" + k + "-" + RandomStringUtils.randomNumeric(5); @@ -367,8 +375,7 @@ public final class Corona extends Configured implements Tool { LOG.trace("Adding key: {} in bucket: {} of volume: {}", key, bucket, volume); long keyCreateStart = System.nanoTime(); - OzoneOutputStream os = ozoneClient.createKey( - volume, bucket, key, value.length); + OzoneOutputStream os = bucket.createKey(key, value.length); keyCreationTime.getAndAdd(System.nanoTime() - keyCreateStart); long keyWriteStart = System.nanoTime(); os.write(value); @@ -378,7 +385,7 @@ public final class Corona extends Configured implements Tool { numberOfKeysAdded.getAndIncrement(); if(validateWrites) { boolean validate = validationQueue.offer( - new KeyValue(volume, bucket, key, value)); + new KeyValue(bucket, key, value)); if(validate) { LOG.trace("Key {}, is queued for validation.", key); } @@ -392,7 +399,7 @@ public final class Corona extends Configured implements Tool { } catch (Exception e) { exception = true; LOG.error("Exception while creating bucket: {}" + - " in volume: {}.", bucket, volume, e); + " in volume: {}.", bucketName, volume, e); } } } @@ -660,8 +667,8 @@ public final class Corona extends Configured implements Tool { try { KeyValue kv = validationQueue.poll(5, TimeUnit.SECONDS); if(kv != null) { - OzoneInputStream is = ozoneClient. - getKey(kv.volume, kv.bucket, kv.key); + + OzoneInputStream is = kv.bucket.readKey(kv.key); byte[] value = new byte[kv.value.length]; int length = is.read(value); totalWritesValidated++; @@ -670,7 +677,7 @@ public final class Corona extends Configured implements Tool { } else { writeValidationFailureCount++; LOG.warn("Data validation error for key {}/{}/{}", - kv.volume, kv.bucket, kv.key); + kv.bucket.getVolumeName(), kv.bucket, kv.key); LOG.warn("Expected: {}, Actual: {}", DFSUtil.bytes2String(kv.value), DFSUtil.bytes2String(value)); @@ -683,22 +690,15 @@ public final class Corona extends Configured implements Tool { } } - - /** * Wrapper to hold ozone key-value pair. */ private static class KeyValue { - /** - * Volume name associated with the key-value. - */ - private String volume; - /** * Bucket name associated with the key-value. */ - private String bucket; + private OzoneBucket bucket; /** * Key name associated with the key-value. */ @@ -714,9 +714,7 @@ public final class Corona extends Configured implements Tool { * @param key key part * @param value value part */ - KeyValue( - String volume, String bucket, String key, byte[] value) { - this.volume = volume; + KeyValue(OzoneBucket bucket, String key, byte[] value) { this.bucket = bucket; this.key = key; this.value = value; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/ozone-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/ozone-default.xml index 7a504b3012b..492ab06c109 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/ozone-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/ozone-default.xml @@ -85,6 +85,17 @@ + + ozone.client.protocol + org.apache.hadoop.ozone.client.rpc.RpcClient + + Protocol class to be used by client to connect to ozone cluster. + The build-in implementation includes: + org.apache.hadoop.ozone.client.rpc.RpcClient for RPC + org.apache.hadoop.ozone.client.rest.RestClient for REST + + + ozone.client.socket.timeout.ms 5000 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java index 501475bcdd6..385f9f928e0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java @@ -24,11 +24,15 @@ import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.client.BucketArgs; +import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.client.OzoneKey; +import org.apache.hadoop.ozone.client.OzoneQuota; import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.client.VolumeArgs; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.web.exceptions.OzoneException; @@ -54,6 +58,7 @@ public class TestOzoneRpcClient { private static MiniOzoneCluster cluster = null; private static OzoneClient ozClient = null; + private static ObjectStore store = null; /** * Create a MiniDFSCluster for testing. @@ -70,48 +75,54 @@ public class TestOzoneRpcClient { OzoneConsts.OZONE_HANDLER_DISTRIBUTED); cluster = new MiniOzoneCluster.Builder(conf) .setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); + conf.set("ozone.client.protocol", + "org.apache.hadoop.ozone.client.rpc.RpcClient"); OzoneClientFactory.setConfiguration(conf); - ozClient = OzoneClientFactory.getRpcClient(); + ozClient = OzoneClientFactory.getClient(); + store = ozClient.getObjectStore(); } @Test public void testCreateVolume() throws IOException, OzoneException { String volumeName = UUID.randomUUID().toString(); - ozClient.createVolume(volumeName); - OzoneVolume volume = ozClient.getVolumeDetails(volumeName); - Assert.assertEquals(volumeName, volume.getVolumeName()); + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + Assert.assertEquals(volumeName, volume.getName()); } @Test public void testCreateVolumeWithOwner() throws IOException, OzoneException { String volumeName = UUID.randomUUID().toString(); - ozClient.createVolume(volumeName, "test"); - OzoneVolume volume = ozClient.getVolumeDetails(volumeName); - Assert.assertEquals(volumeName, volume.getVolumeName()); - Assert.assertEquals("test", volume.getOwnerName()); + VolumeArgs.Builder argsBuilder = VolumeArgs.newBuilder(); + argsBuilder.setOwner("test"); + store.createVolume(volumeName, argsBuilder.build()); + OzoneVolume volume = store.getVolume(volumeName); + Assert.assertEquals(volumeName, volume.getName()); + Assert.assertEquals("test", volume.getOwner()); } @Test public void testCreateVolumeWithQuota() throws IOException, OzoneException { String volumeName = UUID.randomUUID().toString(); - ozClient.createVolume(volumeName, "test", - 10000000000L); - OzoneVolume volume = ozClient.getVolumeDetails(volumeName); - Assert.assertEquals(volumeName, volume.getVolumeName()); - Assert.assertEquals("test", volume.getOwnerName()); - Assert.assertEquals(10000000000L, volume.getQuota()); + VolumeArgs.Builder argsBuilder = VolumeArgs.newBuilder(); + argsBuilder.setOwner("test").setQuota("1000000000 BYTES"); + store.createVolume(volumeName, argsBuilder.build()); + OzoneVolume volume = store.getVolume(volumeName); + Assert.assertEquals(volumeName, volume.getName()); + Assert.assertEquals("test", volume.getOwner()); + Assert.assertEquals(1000000000L, volume.getQuota()); } @Test public void testVolumeAlreadyExist() throws IOException, OzoneException { String volumeName = UUID.randomUUID().toString(); - ozClient.createVolume(volumeName); + store.createVolume(volumeName); try { - ozClient.createVolume(volumeName); + store.createVolume(volumeName); } catch (IOException ex) { Assert.assertEquals( "Volume creation failed, error:VOLUME_ALREADY_EXISTS", @@ -123,20 +134,21 @@ public class TestOzoneRpcClient { public void testSetVolumeOwner() throws IOException, OzoneException { String volumeName = UUID.randomUUID().toString(); - ozClient.createVolume(volumeName); - ozClient.setVolumeOwner(volumeName, "test"); - OzoneVolume volume = ozClient.getVolumeDetails(volumeName); - Assert.assertEquals("test", volume.getOwnerName()); + store.createVolume(volumeName); + store.getVolume(volumeName).setOwner("test"); + OzoneVolume volume = store.getVolume(volumeName); + Assert.assertEquals("test", volume.getOwner()); } @Test public void testSetVolumeQuota() throws IOException, OzoneException { String volumeName = UUID.randomUUID().toString(); - ozClient.createVolume(volumeName); - ozClient.setVolumeQuota(volumeName, 10000000000L); - OzoneVolume volume = ozClient.getVolumeDetails(volumeName); - Assert.assertEquals(10000000000L, volume.getQuota()); + store.createVolume(volumeName); + store.getVolume(volumeName).setQuota( + OzoneQuota.parseQuota("100000000 BYTES")); + OzoneVolume volume = store.getVolume(volumeName); + Assert.assertEquals(100000000L, volume.getQuota()); } @Test @@ -144,11 +156,11 @@ public class TestOzoneRpcClient { throws IOException, OzoneException { thrown.expectMessage("Info Volume failed, error"); String volumeName = UUID.randomUUID().toString(); - ozClient.createVolume(volumeName); - OzoneVolume volume = ozClient.getVolumeDetails(volumeName); + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); Assert.assertNotNull(volume); - ozClient.deleteVolume(volumeName); - ozClient.getVolumeDetails(volumeName); + store.deleteVolume(volumeName); + store.getVolume(volumeName); } @Test @@ -156,10 +168,11 @@ public class TestOzoneRpcClient { throws IOException, OzoneException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); - ozClient.createVolume(volumeName); - ozClient.createBucket(volumeName, bucketName); - OzoneBucket bucket = ozClient.getBucketDetails(volumeName, bucketName); - Assert.assertEquals(bucketName, bucket.getBucketName()); + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + volume.createBucket(bucketName); + OzoneBucket bucket = volume.getBucket(bucketName); + Assert.assertEquals(bucketName, bucket.getName()); } @Test @@ -167,13 +180,14 @@ public class TestOzoneRpcClient { throws IOException, OzoneException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); - ozClient.createVolume(volumeName); - ozClient.createBucket(volumeName, bucketName, - OzoneConsts.Versioning.ENABLED); - OzoneBucket bucket = ozClient.getBucketDetails(volumeName, bucketName); - Assert.assertEquals(bucketName, bucket.getBucketName()); - Assert.assertEquals(OzoneConsts.Versioning.ENABLED, - bucket.getVersioning()); + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + BucketArgs.Builder builder = BucketArgs.newBuilder(); + builder.setIsVersionEnabled(true); + volume.createBucket(bucketName, builder.build()); + OzoneBucket bucket = volume.getBucket(bucketName); + Assert.assertEquals(bucketName, bucket.getName()); + Assert.assertEquals(true, bucket.getVersioning()); } @Test @@ -181,10 +195,13 @@ public class TestOzoneRpcClient { throws IOException, OzoneException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); - ozClient.createVolume(volumeName); - ozClient.createBucket(volumeName, bucketName, StorageType.SSD); - OzoneBucket bucket = ozClient.getBucketDetails(volumeName, bucketName); - Assert.assertEquals(bucketName, bucket.getBucketName()); + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + BucketArgs.Builder builder = BucketArgs.newBuilder(); + builder.setStorageType(StorageType.SSD); + volume.createBucket(bucketName, builder.build()); + OzoneBucket bucket = volume.getBucket(bucketName); + Assert.assertEquals(bucketName, bucket.getName()); Assert.assertEquals(StorageType.SSD, bucket.getStorageType()); } @@ -195,10 +212,15 @@ public class TestOzoneRpcClient { String bucketName = UUID.randomUUID().toString(); OzoneAcl userAcl = new OzoneAcl(OzoneAcl.OzoneACLType.USER, "test", OzoneAcl.OzoneACLRights.READ_WRITE); - ozClient.createVolume(volumeName); - ozClient.createBucket(volumeName, bucketName, userAcl); - OzoneBucket bucket = ozClient.getBucketDetails(volumeName, bucketName); - Assert.assertEquals(bucketName, bucket.getBucketName()); + List acls = new ArrayList<>(); + acls.add(userAcl); + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + BucketArgs.Builder builder = BucketArgs.newBuilder(); + builder.setAcls(acls); + volume.createBucket(bucketName, builder.build()); + OzoneBucket bucket = volume.getBucket(bucketName); + Assert.assertEquals(bucketName, bucket.getName()); Assert.assertTrue(bucket.getAcls().contains(userAcl)); } @@ -209,46 +231,38 @@ public class TestOzoneRpcClient { String bucketName = UUID.randomUUID().toString(); OzoneAcl userAcl = new OzoneAcl(OzoneAcl.OzoneACLType.USER, "test", OzoneAcl.OzoneACLRights.READ_WRITE); - ozClient.createVolume(volumeName); - ozClient.createBucket(volumeName, bucketName, - OzoneConsts.Versioning.ENABLED, - StorageType.SSD, userAcl); - OzoneBucket bucket = ozClient.getBucketDetails(volumeName, bucketName); - Assert.assertEquals(bucketName, bucket.getBucketName()); - Assert.assertEquals(OzoneConsts.Versioning.ENABLED, - bucket.getVersioning()); + List acls = new ArrayList<>(); + acls.add(userAcl); + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + BucketArgs.Builder builder = BucketArgs.newBuilder(); + builder.setIsVersionEnabled(true) + .setStorageType(StorageType.SSD) + .setAcls(acls); + volume.createBucket(bucketName, builder.build()); + OzoneBucket bucket = volume.getBucket(bucketName); + Assert.assertEquals(bucketName, bucket.getName()); + Assert.assertEquals(true, bucket.getVersioning()); Assert.assertEquals(StorageType.SSD, bucket.getStorageType()); Assert.assertTrue(bucket.getAcls().contains(userAcl)); } - @Test - public void testCreateBucketInInvalidVolume() - throws IOException, OzoneException { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - try { - ozClient.createBucket(volumeName, bucketName); - } catch (IOException ex) { - Assert.assertEquals( - "Bucket creation failed, error: VOLUME_NOT_FOUND", - ex.getMessage()); - } - } - @Test public void testAddBucketAcl() throws IOException, OzoneException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); - ozClient.createVolume(volumeName); - ozClient.createBucket(volumeName, bucketName); + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + volume.createBucket(bucketName); List acls = new ArrayList<>(); acls.add(new OzoneAcl( OzoneAcl.OzoneACLType.USER, "test", OzoneAcl.OzoneACLRights.READ_WRITE)); - ozClient.addBucketAcls(volumeName, bucketName, acls); - OzoneBucket bucket = ozClient.getBucketDetails(volumeName, bucketName); - Assert.assertEquals(bucketName, bucket.getBucketName()); + OzoneBucket bucket = volume.getBucket(bucketName); + bucket.addAcls(acls); + OzoneBucket newBucket = volume.getBucket(bucketName); + Assert.assertEquals(bucketName, newBucket.getName()); Assert.assertTrue(bucket.getAcls().contains(acls.get(0))); } @@ -259,13 +273,17 @@ public class TestOzoneRpcClient { String bucketName = UUID.randomUUID().toString(); OzoneAcl userAcl = new OzoneAcl(OzoneAcl.OzoneACLType.USER, "test", OzoneAcl.OzoneACLRights.READ_WRITE); - ozClient.createVolume(volumeName); - ozClient.createBucket(volumeName, bucketName, userAcl); List acls = new ArrayList<>(); acls.add(userAcl); - ozClient.removeBucketAcls(volumeName, bucketName, acls); - OzoneBucket bucket = ozClient.getBucketDetails(volumeName, bucketName); - Assert.assertEquals(bucketName, bucket.getBucketName()); + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + BucketArgs.Builder builder = BucketArgs.newBuilder(); + builder.setAcls(acls); + volume.createBucket(bucketName, builder.build()); + OzoneBucket bucket = volume.getBucket(bucketName); + bucket.removeAcls(acls); + OzoneBucket newBucket = volume.getBucket(bucketName); + Assert.assertEquals(bucketName, newBucket.getName()); Assert.assertTrue(!bucket.getAcls().contains(acls.get(0))); } @@ -274,14 +292,14 @@ public class TestOzoneRpcClient { throws IOException, OzoneException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); - ozClient.createVolume(volumeName); - ozClient.createBucket(volumeName, bucketName); - ozClient.setBucketVersioning(volumeName, bucketName, - OzoneConsts.Versioning.ENABLED); - OzoneBucket bucket = ozClient.getBucketDetails(volumeName, bucketName); - Assert.assertEquals(bucketName, bucket.getBucketName()); - Assert.assertEquals(OzoneConsts.Versioning.ENABLED, - bucket.getVersioning()); + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + volume.createBucket(bucketName); + OzoneBucket bucket = volume.getBucket(bucketName); + bucket.setVersioning(true); + OzoneBucket newBucket = volume.getBucket(bucketName); + Assert.assertEquals(bucketName, newBucket.getName()); + Assert.assertEquals(true, newBucket.getVersioning()); } @Test @@ -289,13 +307,14 @@ public class TestOzoneRpcClient { throws IOException, OzoneException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); - ozClient.createVolume(volumeName); - ozClient.createBucket(volumeName, bucketName); - ozClient.setBucketStorageType(volumeName, bucketName, - StorageType.SSD); - OzoneBucket bucket = ozClient.getBucketDetails(volumeName, bucketName); - Assert.assertEquals(bucketName, bucket.getBucketName()); - Assert.assertEquals(StorageType.SSD, bucket.getStorageType()); + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + volume.createBucket(bucketName); + OzoneBucket bucket = volume.getBucket(bucketName); + bucket.setStorageType(StorageType.SSD); + OzoneBucket newBucket = volume.getBucket(bucketName); + Assert.assertEquals(bucketName, newBucket.getName()); + Assert.assertEquals(StorageType.SSD, newBucket.getStorageType()); } @@ -305,12 +324,13 @@ public class TestOzoneRpcClient { thrown.expectMessage("Info Bucket failed, error"); String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); - ozClient.createVolume(volumeName); - ozClient.createBucket(volumeName, bucketName); - OzoneBucket bucket = ozClient.getBucketDetails(volumeName, bucketName); + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + volume.createBucket(bucketName); + OzoneBucket bucket = volume.getBucket(bucketName); Assert.assertNotNull(bucket); - ozClient.deleteBucket(volumeName, bucketName); - ozClient.getBucketDetails(volumeName, bucketName); + volume.deleteBucket(bucketName); + volume.getBucket(bucketName); } @@ -321,19 +341,21 @@ public class TestOzoneRpcClient { String bucketName = UUID.randomUUID().toString(); String value = "sample value"; - ozClient.createVolume(volumeName); - ozClient.createBucket(volumeName, bucketName); + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + volume.createBucket(bucketName); + OzoneBucket bucket = volume.getBucket(bucketName); for (int i = 0; i < 10; i++) { String keyName = UUID.randomUUID().toString(); - OzoneOutputStream out = ozClient.createKey(volumeName, bucketName, - keyName, value.getBytes().length); + OzoneOutputStream out = bucket.createKey(keyName, + value.getBytes().length); out.write(value.getBytes()); out.close(); - OzoneKey key = ozClient.getKeyDetails(volumeName, bucketName, keyName); - Assert.assertEquals(keyName, key.getKeyName()); - OzoneInputStream is = ozClient.getKey(volumeName, bucketName, keyName); + OzoneKey key = bucket.getKey(keyName); + Assert.assertEquals(keyName, key.getName()); + OzoneInputStream is = bucket.readKey(keyName); byte[] fileContent = new byte[value.getBytes().length]; is.read(fileContent); Assert.assertEquals(value, new String(fileContent)); @@ -348,23 +370,28 @@ public class TestOzoneRpcClient { String bucketName = UUID.randomUUID().toString(); String keyName = UUID.randomUUID().toString(); String value = "sample value"; - ozClient.createVolume(volumeName); - ozClient.createBucket(volumeName, bucketName); - OzoneOutputStream out = ozClient.createKey(volumeName, bucketName, - keyName, value.getBytes().length); + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + volume.createBucket(bucketName); + OzoneBucket bucket = volume.getBucket(bucketName); + OzoneOutputStream out = bucket.createKey(keyName, + value.getBytes().length); out.write(value.getBytes()); out.close(); - OzoneKey key = ozClient.getKeyDetails(volumeName, bucketName, keyName); - Assert.assertEquals(keyName, key.getKeyName()); - ozClient.deleteKey(volumeName, bucketName, keyName); - ozClient.getKeyDetails(volumeName, bucketName, keyName); + OzoneKey key = bucket.getKey(keyName); + Assert.assertEquals(keyName, key.getName()); + bucket.deleteKey(keyName); + bucket.getKey(keyName); } /** - * Shutdown MiniDFSCluster. + * Close OzoneClient and shutdown MiniDFSCluster. */ @AfterClass - public static void shutdown() { + public static void shutdown() throws IOException { + if(ozClient != null) { + ozClient.close(); + } if (cluster != null) { cluster.shutdown(); }