From dc72782008b2c66970dc3dee47fe12e4850bfefe Mon Sep 17 00:00:00 2001 From: Anu Engineer Date: Wed, 28 Aug 2019 09:53:31 -0700 Subject: [PATCH] HDDS-738. Removing REST protocol support from OzoneClient. Contributed by Elek, Marton. --- .../apache/hadoop/ozone/OzoneConfigKeys.java | 3 - .../src/main/resources/ozone-default.xml | 15 +- .../hadoop/ozone/client/OzoneBucket.java | 2 + .../ozone/client/OzoneClientException.java | 41 +- .../ozone/client/OzoneClientFactory.java | 127 +- .../hadoop/ozone/client/OzoneClientUtils.java | 91 -- .../rest/DefaultRestServerSelector.java | 36 - .../client/rest/OzoneExceptionMapper.java | 45 - .../hadoop/ozone/client/rest/RestClient.java | 1130 ------------- .../ozone/client/rest/RestServerSelector.java | 40 - .../client/rest/exceptions/package-info.java | 22 - .../client/rest/headers/package-info.java | 22 - .../ozone/client/rest/package-info.java | 23 - .../ozone/client/rest/OzoneException.java | 267 --- .../ozone/client/rest/headers/Header.java | 74 - .../client/rest/headers/package-info.java | 22 - .../ozone/client/rest/package-info.java | 22 - .../client/rest/response/BucketInfo.java | 248 --- .../ozone/client/rest/response/KeyInfo.java | 236 --- .../client/rest/response/KeyInfoDetails.java | 118 -- .../client/rest/response/KeyLocation.java | 89 - .../client/rest/response/VolumeInfo.java | 215 --- .../client/rest/response/VolumeOwner.java | 61 - .../client/rest/response/package-info.java | 24 - .../hadoop/ozone/om/helpers/ServiceInfo.java | 23 - .../hadoop/ozone/web/handlers/BucketArgs.java | 134 -- .../hadoop/ozone/web/handlers/KeyArgs.java | 117 -- .../hadoop/ozone/web/handlers/ListArgs.java | 142 -- .../hadoop/ozone/web/handlers/UserArgs.java | 172 -- .../hadoop/ozone/web/handlers/VolumeArgs.java | 143 -- .../ozone/web/handlers/package-info.java | 22 - .../hadoop/ozone/web/request/OzoneQuota.java | 214 --- .../ozone/web/request/package-info.java | 23 - .../hadoop/ozone/web/response/BucketInfo.java | 335 ---- .../hadoop/ozone/web/response/KeyInfo.java | 311 ---- .../ozone/web/response/KeyInfoDetails.java | 80 - .../ozone/web/response/KeyLocation.java | 82 - .../ozone/web/response/ListBuckets.java | 154 -- .../hadoop/ozone/web/response/ListKeys.java | 209 --- .../ozone/web/response/ListVolumes.java | 152 -- .../hadoop/ozone/web/response/VolumeInfo.java | 308 ---- .../ozone/web/response/VolumeOwner.java | 60 - .../ozone/web/response/package-info.java | 23 - .../hadoop/ozone/web/TestBucketInfo.java | 82 - .../apache/hadoop/ozone/web/TestQuota.java | 115 -- .../apache/hadoop/ozone/web/TestUtils.java | 101 -- .../hadoop/ozone/web/TestVolumeStructs.java | 73 - hadoop-ozone/datanode/pom.xml | 4 - .../dev-support/bin/dist-layout-stitching | 4 - .../main/smoketest/basic/ozone-shell.robot | 43 +- .../src/main/smoketest/ozonefs/ozonefs.robot | 18 +- hadoop-ozone/integration-test/pom.xml | 4 - .../apache/hadoop/ozone/MiniOzoneCluster.java | 9 - .../hadoop/ozone/MiniOzoneClusterImpl.java | 16 - .../apache/hadoop/ozone/RatisTestHelper.java | 26 +- .../org/apache/hadoop/ozone/TestDataUtil.java | 95 ++ .../apache/hadoop/ozone/TestOzoneHelper.java | 413 ----- .../TestStorageContainerManagerHelper.java | 80 +- .../rpc/TestOzoneRpcClientAbstract.java | 42 +- .../commandhandler/TestBlockDeletion.java | 3 +- .../TestCloseContainerHandler.java | 22 +- .../om/TestMultipleContainerReadWrite.java | 208 --- .../apache/hadoop/ozone/om/TestOmAcls.java | 95 +- .../ozone/om/TestOmBlockVersioning.java | 93 +- .../apache/hadoop/ozone/om/TestOmInit.java | 18 +- .../hadoop/ozone/om/TestOzoneManager.java | 1449 ----------------- .../ozone/om/TestOzoneManagerRestart.java | 28 +- .../ozone/ozShell/TestOzoneDatanodeShell.java | 28 +- .../hadoop/ozone/ozShell/TestOzoneShell.java | 1266 -------------- .../web/TestOzoneRestWithMiniCluster.java | 221 --- .../hadoop/ozone/web/TestOzoneVolumes.java | 187 --- .../hadoop/ozone/web/TestOzoneWebAccess.java | 118 -- .../hadoop/ozone/web/client/TestBuckets.java | 349 ---- .../ozone/web/client/TestBucketsRatis.java | 108 -- .../hadoop/ozone/web/client/TestKeys.java | 734 --------- .../ozone/web/client/TestKeysRatis.java | 126 -- .../ozone/web/client/TestOzoneClient.java | 304 ---- .../hadoop/ozone/web/client/TestVolume.java | 381 ----- .../ozone/web/client/TestVolumeRatis.java | 155 -- .../hadoop/ozone/web/client/package-info.java | 22 - .../apache/hadoop/ozone/web/package-info.java | 22 - hadoop-ozone/objectstore-service/pom.xml | 126 -- .../server/datanode/ObjectStoreHandler.java | 160 -- .../hdfs/server/datanode/package-info.java | 22 - .../apache/hadoop/ozone/OzoneRestUtils.java | 222 --- .../org/apache/hadoop/ozone/package-info.java | 22 - .../ozone/web/ObjectStoreApplication.java | 59 - .../ozone/web/OzoneHddsDatanodeService.java | 89 - .../ozone/web/exceptions/ErrorTable.java | 225 --- .../ozone/web/exceptions/package-info.java | 22 - .../ozone/web/handlers/BucketHandler.java | 190 --- .../web/handlers/BucketProcessTemplate.java | 294 ---- .../hadoop/ozone/web/handlers/KeyHandler.java | 302 ---- .../web/handlers/KeyProcessTemplate.java | 235 --- .../ozone/web/handlers/ServiceFilter.java | 61 - .../web/handlers/StorageHandlerBuilder.java | 80 - .../web/handlers/UserHandlerBuilder.java | 75 - .../ozone/web/handlers/VolumeHandler.java | 274 ---- .../web/handlers/VolumeProcessTemplate.java | 276 ---- .../ozone/web/handlers/package-info.java | 22 - .../ozone/web/interfaces/Accounting.java | 57 - .../hadoop/ozone/web/interfaces/Bucket.java | 184 --- .../hadoop/ozone/web/interfaces/Keys.java | 177 -- .../ozone/web/interfaces/StorageHandler.java | 308 ---- .../hadoop/ozone/web/interfaces/UserAuth.java | 101 -- .../hadoop/ozone/web/interfaces/Volume.java | 185 --- .../ozone/web/interfaces/package-info.java | 22 - .../ozone/web/localstorage/package-info.java | 18 - .../LengthInputStreamMessageBodyWriter.java | 59 - .../web/messages/StringMessageBodyWriter.java | 62 - .../ozone/web/messages/package-info.java | 18 - .../web/netty/CloseableCleanupListener.java | 46 - .../web/netty/ObjectStoreChannelHandler.java | 78 - .../web/netty/ObjectStoreJerseyContainer.java | 348 ---- .../ObjectStoreJerseyContainerProvider.java | 40 - .../web/netty/ObjectStoreRestHttpServer.java | 215 --- .../web/netty/ObjectStoreURLDispatcher.java | 61 - ...questContentObjectStoreChannelHandler.java | 117 -- ...uestDispatchObjectStoreChannelHandler.java | 103 -- .../hadoop/ozone/web/netty/package-info.java | 26 - .../apache/hadoop/ozone/web/package-info.java | 22 - .../storage/DistributedStorageHandler.java | 620 ------- .../ozone/web/storage/package-info.java | 27 - .../hadoop/ozone/web/userauth/Simple.java | 169 -- .../ozone/web/userauth/package-info.java | 23 - ...sun.jersey.spi.container.ContainerProvider | 16 - .../hadoop/ozone/web/TestErrorCode.java | 53 - .../apache/hadoop/ozone/web/package-info.java | 22 - .../ozone/web/ozShell/ObjectPrinter.java} | 24 +- .../ozone/web/ozShell/OzoneAddress.java | 21 +- .../ozShell/bucket/CreateBucketHandler.java | 10 +- .../web/ozShell/bucket/InfoBucketHandler.java | 7 +- .../web/ozShell/bucket/ListBucketHandler.java | 20 +- .../web/ozShell/keys/InfoKeyHandler.java | 6 +- .../web/ozShell/keys/ListKeyHandler.java | 23 +- .../ozShell/volume/CreateVolumeHandler.java | 6 +- .../web/ozShell/volume/InfoVolumeHandler.java | 6 +- .../web/ozShell/volume/ListVolumeHandler.java | 21 +- .../ozShell/volume/UpdateVolumeHandler.java | 14 +- .../ozone/web/ozShell/TestObjectPrinter.java | 50 + .../ozone/web/ozShell/TestOzoneAddress.java | 7 +- hadoop-ozone/ozonefs/pom.xml | 5 - .../fs/ozone/TestOzoneFSInputStream.java | 44 +- .../fs/ozone/TestOzoneFileInterfaces.java | 77 +- .../hadoop/fs/ozone/TestOzoneFileSystem.java | 76 +- .../hadoop/fs/ozone/TestOzoneFsRenameDir.java | 35 +- .../fs/ozone/contract/OzoneContract.java | 42 +- hadoop-ozone/pom.xml | 6 - .../ozone/s3/TestOzoneClientProducer.java | 25 +- .../apache/hadoop/ozone/om/TestOmSQLCli.java | 90 +- 150 files changed, 563 insertions(+), 18724 deletions(-) delete mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/DefaultRestServerSelector.java delete mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/OzoneExceptionMapper.java delete mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java delete mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestServerSelector.java delete mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/exceptions/package-info.java delete mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/headers/package-info.java delete mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/package-info.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/OzoneException.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/headers/Header.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/headers/package-info.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/package-info.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/BucketInfo.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/KeyInfo.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/KeyInfoDetails.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/KeyLocation.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/VolumeInfo.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/VolumeOwner.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/package-info.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketArgs.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyArgs.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/ListArgs.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/UserArgs.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeArgs.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/package-info.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/request/OzoneQuota.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/request/package-info.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfoDetails.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/KeyLocation.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/ListBuckets.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/ListKeys.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/ListVolumes.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/VolumeInfo.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/VolumeOwner.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/package-info.java delete mode 100644 hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestBucketInfo.java delete mode 100644 hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestQuota.java delete mode 100644 hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestUtils.java delete mode 100644 hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestVolumeStructs.java create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneHelper.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestMultipleContainerReadWrite.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneWebAccess.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBucketsRatis.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeysRatis.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestOzoneClient.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolumeRatis.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/package-info.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/package-info.java delete mode 100644 hadoop-ozone/objectstore-service/pom.xml delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/package-info.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/OzoneRestUtils.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/package-info.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/ObjectStoreApplication.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/OzoneHddsDatanodeService.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/exceptions/ErrorTable.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/exceptions/package-info.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketHandler.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketProcessTemplate.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyHandler.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyProcessTemplate.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/ServiceFilter.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/StorageHandlerBuilder.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/UserHandlerBuilder.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeHandler.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeProcessTemplate.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/package-info.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/Accounting.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/Bucket.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/Keys.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/StorageHandler.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/UserAuth.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/Volume.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/package-info.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/localstorage/package-info.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/messages/LengthInputStreamMessageBodyWriter.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/messages/StringMessageBodyWriter.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/messages/package-info.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/CloseableCleanupListener.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/ObjectStoreChannelHandler.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/ObjectStoreJerseyContainer.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/ObjectStoreJerseyContainerProvider.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/ObjectStoreRestHttpServer.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/ObjectStoreURLDispatcher.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/RequestContentObjectStoreChannelHandler.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/RequestDispatchObjectStoreChannelHandler.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/package-info.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/package-info.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/package-info.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/userauth/Simple.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/userauth/package-info.java delete mode 100644 hadoop-ozone/objectstore-service/src/main/resources/META-INF/services/com.sun.jersey.spi.container.ContainerProvider delete mode 100644 hadoop-ozone/objectstore-service/src/test/java/org/apache/hadoop/ozone/web/TestErrorCode.java delete mode 100644 hadoop-ozone/objectstore-service/src/test/java/org/apache/hadoop/ozone/web/package-info.java rename hadoop-ozone/{common/src/test/java/org/apache/hadoop/ozone/web/package-info.java => ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/ObjectPrinter.java} (59%) create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/TestObjectPrinter.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index f8b2973cf62..879e8c2bdd9 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -125,9 +125,6 @@ public final class OzoneConfigKeys { * */ public static final String OZONE_ADMINISTRATORS_WILDCARD = "*"; - public static final String OZONE_CLIENT_PROTOCOL = - "ozone.client.protocol"; - public static final String OZONE_CLIENT_STREAM_BUFFER_FLUSH_SIZE = "ozone.client.stream.buffer.flush.size"; diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index f7334b166b8..9987415a1c5 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -461,19 +461,6 @@ there is no wait. - - ozone.client.protocol - org.apache.hadoop.ozone.client.rpc.RpcClient - OZONE, CLIENT, MANAGEMENT - Protocol class to be used by the client to connect to ozone - cluster. - The build-in implementation includes: - org.apache.hadoop.ozone.client.rpc.RpcClient for RPC - org.apache.hadoop.ozone.client.rest.RestClient for REST - The default is the RpClient. Please do not change this unless you have a - very good understanding of what you are doing. - - ozone.client.socket.timeout 5000ms @@ -1261,7 +1248,7 @@ hdds.datanode.plugins - org.apache.hadoop.ozone.web.OzoneHddsDatanodeService + Comma-separated list of HDDS datanode plug-ins to be activated when HDDS service starts as part of datanode. diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java index fbb50e212e3..4fd723148ea 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.client; +import com.fasterxml.jackson.annotation.JsonIgnore; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; @@ -211,6 +212,7 @@ public String getName() { * * @return acls */ + @JsonIgnore public List getAcls() throws IOException { return proxy.getAcl(ozoneObj); } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientException.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientException.java index de3116a6aa8..2e9080a66f8 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientException.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientException.java @@ -17,38 +17,27 @@ */ package org.apache.hadoop.ozone.client; -import org.apache.hadoop.ozone.client.rest.OzoneException; - /** * This exception is thrown by the Ozone Clients. */ -public class OzoneClientException extends OzoneException { - /** - * Constructor that allows the shortMessage. - * - * @param shortMessage Short Message - */ - public OzoneClientException(String shortMessage) { - super(0, shortMessage, shortMessage); +public class OzoneClientException extends Exception { + public OzoneClientException() { } - /** - * Constructor that allows a shortMessage and an exception. - * - * @param shortMessage short message - * @param ex exception - */ - public OzoneClientException(String shortMessage, Exception ex) { - super(0, shortMessage, shortMessage, ex); + public OzoneClientException(String s) { + super(s); } - /** - * Constructor that allows the shortMessage and a longer message. - * - * @param shortMessage Short Message - * @param message long error message - */ - public OzoneClientException(String shortMessage, String message) { - super(0, shortMessage, message); + public OzoneClientException(String s, Throwable throwable) { + super(s, throwable); + } + + public OzoneClientException(Throwable throwable) { + super(throwable); + } + + public OzoneClientException(String s, Throwable throwable, boolean b, + boolean b1) { + super(s, throwable, b, b1); } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java index de0d166abda..713a6b2df3f 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java @@ -18,37 +18,22 @@ package org.apache.hadoop.ozone.client; -import com.google.common.base.Preconditions; +import java.io.IOException; +import java.lang.reflect.Proxy; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.ozone.client.rest.RestClient; import org.apache.hadoop.ozone.client.rpc.RpcClient; +import com.google.common.base.Preconditions; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Proxy; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_PROTOCOL; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY; - /** - * Factory class to create different types of OzoneClients. - * Based on ozone.client.protocol, it decides which - * protocol to use for the communication. - * Default value is - * org.apache.hadoop.ozone.client.rpc.RpcClient.
- * OzoneClientFactory constructs a proxy using - * {@link OzoneClientInvocationHandler} - * and creates OzoneClient instance with it. - * {@link OzoneClientInvocationHandler} dispatches the call to - * underlying {@link ClientProtocol} implementation. + * Factory class to create OzoneClients. */ public final class OzoneClientFactory { @@ -87,9 +72,7 @@ public static OzoneClient getClient() throws IOException { public static OzoneClient getClient(Configuration config) throws IOException { Preconditions.checkNotNull(config); - Class clazz = (Class) - config.getClass(OZONE_CLIENT_PROTOCOL, RpcClient.class); - return getClient(getClientProtocol(clazz, config), config); + return getClient(getClientProtocol(config), config); } /** @@ -166,85 +149,7 @@ public static OzoneClient getRpcClient(String omHost, Integer omRpcPort, public static OzoneClient getRpcClient(Configuration config) throws IOException { Preconditions.checkNotNull(config); - return getClient(getClientProtocol(RpcClient.class, config), - config); - } - - /** - * Returns an OzoneClient which will use REST protocol. - * - * @param omHost - * hostname of OzoneManager to connect. - * - * @return OzoneClient - * - * @throws IOException - */ - public static OzoneClient getRestClient(String omHost) - throws IOException { - Configuration config = new OzoneConfiguration(); - int port = OmUtils.getOmRestPort(config); - return getRestClient(omHost, port, config); - } - - /** - * Returns an OzoneClient which will use REST protocol. - * - * @param omHost - * hostname of OzoneManager to connect. - * - * @param omHttpPort - * HTTP port of OzoneManager. - * - * @return OzoneClient - * - * @throws IOException - */ - public static OzoneClient getRestClient(String omHost, Integer omHttpPort) - throws IOException { - return getRestClient(omHost, omHttpPort, new OzoneConfiguration()); - } - - /** - * Returns an OzoneClient which will use REST protocol. - * - * @param omHost - * hostname of OzoneManager to connect. - * - * @param omHttpPort - * HTTP port of OzoneManager. - * - * @param config - * Configuration to be used for OzoneClient creation - * - * @return OzoneClient - * - * @throws IOException - */ - public static OzoneClient getRestClient(String omHost, Integer omHttpPort, - Configuration config) - throws IOException { - Preconditions.checkNotNull(omHost); - Preconditions.checkNotNull(omHttpPort); - Preconditions.checkNotNull(config); - config.set(OZONE_OM_HTTP_ADDRESS_KEY, omHost + ":" + omHttpPort); - return getRestClient(config); - } - - /** - * Returns an OzoneClient which will use REST protocol. - * - * @param config - * Configuration to be used for OzoneClient creation - * - * @return OzoneClient - * - * @throws IOException - */ - public static OzoneClient getRestClient(Configuration config) - throws IOException { - Preconditions.checkNotNull(config); - return getClient(getClientProtocol(RestClient.class, config), + return getClient(getClientProtocol(config), config); } @@ -270,8 +175,6 @@ private static OzoneClient getClient(ClientProtocol clientProtocol, /** * Returns an instance of Protocol class. * - * @param protocolClass - * Class object of the ClientProtocol. * * @param config * Configuration used to initialize ClientProtocol. @@ -280,23 +183,15 @@ private static OzoneClient getClient(ClientProtocol clientProtocol, * * @throws IOException */ - private static ClientProtocol getClientProtocol( - Class protocolClass, Configuration config) + private static ClientProtocol getClientProtocol(Configuration config) throws IOException { try { - LOG.debug("Using {} as client protocol.", - protocolClass.getCanonicalName()); - Constructor ctor = - protocolClass.getConstructor(Configuration.class); - return ctor.newInstance(config); + return new RpcClient(config); } catch (Exception e) { - final String message = "Couldn't create protocol " + protocolClass; + final String message = "Couldn't create RpcClient protocol"; LOG.error(message + " exception: ", e); if (e.getCause() instanceof IOException) { throw (IOException) e.getCause(); - } else if (e instanceof InvocationTargetException) { - throw new IOException(message, - ((InvocationTargetException) e).getTargetException()); } else { throw new IOException(message, e); } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java index 3da45cfc902..8531bfbe71e 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java @@ -17,106 +17,15 @@ */ package org.apache.hadoop.ozone.client; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; import java.util.concurrent.TimeUnit; -import org.apache.hadoop.hdds.client.OzoneQuota; -import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.io.retry.RetryPolicies; import org.apache.hadoop.io.retry.RetryPolicy; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.client.rest.response.BucketInfo; -import org.apache.hadoop.ozone.client.rest.response.KeyInfo; -import org.apache.hadoop.ozone.client.rest.response.KeyInfoDetails; -import org.apache.hadoop.ozone.client.rest.response.KeyLocation; -import org.apache.hadoop.ozone.client.rest.response.VolumeInfo; -import org.apache.hadoop.ozone.client.rest.response.VolumeOwner; /** A utility class for OzoneClient. */ public final class OzoneClientUtils { private OzoneClientUtils() {} - /** - * Returns a BucketInfo object constructed using fields of the input - * OzoneBucket object. - * - * @param bucket OzoneBucket instance from which BucketInfo object needs to - * be created. - * @return BucketInfo instance - */ - public static BucketInfo asBucketInfo(OzoneBucket bucket) throws IOException { - BucketInfo bucketInfo = - new BucketInfo(bucket.getVolumeName(), bucket.getName()); - bucketInfo - .setCreatedOn(HddsClientUtils.formatDateTime(bucket.getCreationTime())); - bucketInfo.setStorageType(bucket.getStorageType()); - bucketInfo.setVersioning( - OzoneConsts.Versioning.getVersioning(bucket.getVersioning())); - bucketInfo.setEncryptionKeyName( - bucket.getEncryptionKeyName()==null? "N/A" : - bucket.getEncryptionKeyName()); - return bucketInfo; - } - - /** - * Returns a VolumeInfo object constructed using fields of the input - * OzoneVolume object. - * - * @param volume OzoneVolume instance from which VolumeInfo object needs to - * be created. - * @return VolumeInfo instance - */ - public static VolumeInfo asVolumeInfo(OzoneVolume volume) { - VolumeInfo volumeInfo = new VolumeInfo(volume.getName(), - HddsClientUtils.formatDateTime(volume.getCreationTime()), - volume.getOwner()); - volumeInfo.setQuota(OzoneQuota.getOzoneQuota(volume.getQuota())); - volumeInfo.setOwner(new VolumeOwner(volume.getOwner())); - return volumeInfo; - } - - /** - * Returns a KeyInfo object constructed using fields of the input - * OzoneKey object. - * - * @param key OzoneKey instance from which KeyInfo object needs to - * be created. - * @return KeyInfo instance - */ - public static KeyInfo asKeyInfo(OzoneKey key) { - KeyInfo keyInfo = new KeyInfo(); - keyInfo.setKeyName(key.getName()); - keyInfo.setCreatedOn(HddsClientUtils.formatDateTime(key.getCreationTime())); - keyInfo.setModifiedOn( - HddsClientUtils.formatDateTime(key.getModificationTime())); - keyInfo.setSize(key.getDataSize()); - return keyInfo; - } - - /** - * Returns a KeyInfoDetails object constructed using fields of the input - * OzoneKeyDetails object. - * - * @param key OzoneKeyDetails instance from which KeyInfo object needs to - * be created. - * @return KeyInfoDetails instance - */ - public static KeyInfoDetails asKeyInfoDetails(OzoneKeyDetails key) { - KeyInfoDetails keyInfo = new KeyInfoDetails(); - keyInfo.setKeyName(key.getName()); - keyInfo.setCreatedOn(HddsClientUtils.formatDateTime(key.getCreationTime())); - keyInfo.setModifiedOn( - HddsClientUtils.formatDateTime(key.getModificationTime())); - keyInfo.setSize(key.getDataSize()); - List keyLocations = new ArrayList<>(); - key.getOzoneKeyLocations().forEach((a) -> keyLocations.add(new KeyLocation( - a.getContainerID(), a.getLocalID(), a.getLength(), a.getOffset()))); - keyInfo.setKeyLocation(keyLocations); - keyInfo.setFileEncryptionInfo(key.getFileEncryptionInfo()); - return keyInfo; - } public static RetryPolicy createRetryPolicy(int maxRetryCount, long retryInterval) { diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/DefaultRestServerSelector.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/DefaultRestServerSelector.java deleted file mode 100644 index abdc2fbe19a..00000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/DefaultRestServerSelector.java +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.rest; - -import org.apache.hadoop.ozone.om.helpers.ServiceInfo; - -import java.util.List; -import java.util.Random; - -/** - * Default selector randomly picks one of the REST Server from the list. - */ -public class DefaultRestServerSelector implements RestServerSelector { - - @Override - public ServiceInfo getRestServer(List restServices) { - return restServices.get( - new Random().nextInt(restServices.size())); - } -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/OzoneExceptionMapper.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/OzoneExceptionMapper.java deleted file mode 100644 index 6c479f7721a..00000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/OzoneExceptionMapper.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.rest; - - -import javax.ws.rs.core.Response; -import javax.ws.rs.ext.ExceptionMapper; - -import org.slf4j.MDC; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Class the represents various errors returned by the - * Object Layer. - */ -public class OzoneExceptionMapper implements ExceptionMapper { - private static final Logger LOG = - LoggerFactory.getLogger(OzoneExceptionMapper.class); - - @Override - public Response toResponse(OzoneException exception) { - LOG.debug("Returning exception. ex: {}", exception.toJsonString()); - MDC.clear(); - return Response.status((int)exception.getHttpCode()) - .entity(exception.toJsonString()).build(); - } - -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java deleted file mode 100644 index e01c2c3eec5..00000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java +++ /dev/null @@ -1,1130 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.rest; - -import com.fasterxml.jackson.core.type.TypeReference; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.base.Preconditions; -import com.google.common.base.Strings; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.crypto.key.KeyProvider; -import org.apache.hadoop.hdds.protocol.StorageType; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.client.HddsClientUtils; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.client.*; -import org.apache.hadoop.hdds.client.OzoneQuota; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.ozone.client.VolumeArgs; -import org.apache.hadoop.ozone.client.io.OzoneInputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.ozone.client.rest.headers.Header; -import org.apache.hadoop.ozone.client.rest.response.BucketInfo; -import org.apache.hadoop.ozone.client.rest.response.KeyInfoDetails; -import org.apache.hadoop.ozone.client.rest.response.VolumeInfo; -import org.apache.hadoop.ozone.client.rpc.OzoneKMSUtil; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.ha.OMFailoverProxyProvider; -import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; -import org.apache.hadoop.ozone.om.helpers.S3SecretValue; -import org.apache.hadoop.ozone.om.helpers.ServiceInfo; -import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServicePort; -import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.ozone.web.response.ListBuckets; -import org.apache.hadoop.ozone.web.response.ListKeys; -import org.apache.hadoop.ozone.web.response.ListVolumes; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.util.Time; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHeaders; -import org.apache.http.HttpResponse; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.client.methods.HttpUriRequest; -import org.apache.http.client.utils.URIBuilder; -import org.apache.http.entity.InputStreamEntity; -import org.apache.http.impl.client.CloseableHttpClient; -import org.apache.http.impl.client.HttpClients; -import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; -import org.apache.http.util.EntityUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.io.PipedInputStream; -import java.io.PipedOutputStream; -import java.net.InetSocketAddress; -import java.net.URI; -import java.net.URISyntaxException; -import java.text.ParseException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.FutureTask; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; - -import static java.net.HttpURLConnection.HTTP_CREATED; -import static java.net.HttpURLConnection.HTTP_OK; -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OM_SERVICE_LIST_HTTP_ENDPOINT; - -/** - * Ozone Client REST protocol implementation. It uses REST protocol to - * connect to Ozone Handler that executes client calls. - */ -public class RestClient implements ClientProtocol { - - private static final String PATH_SEPARATOR = "/"; - private static final Logger LOG = LoggerFactory.getLogger(RestClient.class); - - private final Configuration conf; - private final URI ozoneRestUri; - private final CloseableHttpClient httpClient; - private final UserGroupInformation ugi; - // private final OzoneAcl.OzoneACLRights userRights; - - /** - * Creates RestClient instance with the given configuration. - * @param conf Configuration - * @throws IOException - */ - public RestClient(Configuration conf) - throws IOException { - try { - Preconditions.checkNotNull(conf); - this.conf = conf; - this.ugi = UserGroupInformation.getCurrentUser(); - long socketTimeout = conf.getTimeDuration( - OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT, - OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS); - long connectionTimeout = conf.getTimeDuration( - OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT, - OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS); - int maxConnection = conf.getInt( - OzoneConfigKeys.OZONE_REST_CLIENT_HTTP_CONNECTION_MAX, - OzoneConfigKeys.OZONE_REST_CLIENT_HTTP_CONNECTION_DEFAULT); - - int maxConnectionPerRoute = conf.getInt( - OzoneConfigKeys.OZONE_REST_CLIENT_HTTP_CONNECTION_PER_ROUTE_MAX, - OzoneConfigKeys - .OZONE_REST_CLIENT_HTTP_CONNECTION_PER_ROUTE_MAX_DEFAULT - ); - - /* - To make RestClient Thread safe, creating the HttpClient with - ThreadSafeClientConnManager. - */ - PoolingHttpClientConnectionManager connManager = - new PoolingHttpClientConnectionManager(); - connManager.setMaxTotal(maxConnection); - connManager.setDefaultMaxPerRoute(maxConnectionPerRoute); - - this.httpClient = HttpClients.custom() - .setConnectionManager(connManager) - .setDefaultRequestConfig( - RequestConfig.custom() - .setSocketTimeout(Math.toIntExact(socketTimeout)) - .setConnectTimeout(Math.toIntExact(connectionTimeout)) - .build()) - .build(); - -// this.userRights = conf.getEnum(OMConfigKeys.OZONE_OM_USER_RIGHTS, -// OMConfigKeys.OZONE_OM_USER_RIGHTS_DEFAULT); - - // TODO: Add new configuration parameter to configure RestServerSelector. - RestServerSelector defaultSelector = new DefaultRestServerSelector(); - InetSocketAddress restServer = getOzoneRestServerAddress(defaultSelector); - URIBuilder uriBuilder = new URIBuilder() - .setScheme("http") - .setHost(restServer.getHostName()) - .setPort(restServer.getPort()); - this.ozoneRestUri = uriBuilder.build(); - - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - private InetSocketAddress getOzoneRestServerAddress( - RestServerSelector selector) throws IOException { - String httpAddress = conf.get(OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY); - - if (httpAddress == null) { - throw new IllegalArgumentException( - OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY + " must be defined. See" + - " https://wiki.apache.org/hadoop/Ozone#Configuration for" + - " details on configuring Ozone."); - } - - HttpGet httpGet = new HttpGet("http://" + httpAddress + - OZONE_OM_SERVICE_LIST_HTTP_ENDPOINT); - HttpEntity entity = executeHttpRequest(httpGet); - try { - String serviceListJson = EntityUtils.toString(entity); - - ObjectMapper objectMapper = new ObjectMapper(); - TypeReference> serviceInfoReference = - new TypeReference>() { - }; - List services = objectMapper.readValue( - serviceListJson, serviceInfoReference); - - List dataNodeInfos = services.stream().filter( - a -> a.getNodeType().equals(HddsProtos.NodeType.DATANODE)) - .collect(Collectors.toList()); - - ServiceInfo restServer = selector.getRestServer(dataNodeInfos); - - return NetUtils.createSocketAddr( - NetUtils.normalizeHostName(restServer.getHostname()) + ":" - + restServer.getPort(ServicePort.Type.HTTP)); - } finally { - EntityUtils.consume(entity); - } - } - - @Override - public void createVolume(String volumeName) throws IOException { - createVolume(volumeName, VolumeArgs.newBuilder().build()); - } - - @Override - public void createVolume(String volumeName, VolumeArgs volArgs) - throws IOException { - try { - HddsClientUtils.verifyResourceName(volumeName); - Preconditions.checkNotNull(volArgs); - URIBuilder builder = new URIBuilder(ozoneRestUri); - String owner = volArgs.getOwner() == null ? - ugi.getUserName() : volArgs.getOwner(); - //TODO: support for ACLs has to be done in OzoneHandler (rest server) - /** - List listOfAcls = new ArrayList<>(); - //User ACL - listOfAcls.add(new OzoneAcl(OzoneAcl.OzoneACLType.USER, - owner, userRights)); - //ACLs from VolumeArgs - if(volArgs.getAcls() != null) { - listOfAcls.addAll(volArgs.getAcls()); - } - */ - builder.setPath(PATH_SEPARATOR + volumeName); - - String quota = volArgs.getQuota(); - if(quota != null) { - builder.setParameter(Header.OZONE_QUOTA_QUERY_TAG, quota); - } - - HttpPost httpPost = new HttpPost(builder.build()); - addOzoneHeaders(httpPost); - //use admin from VolumeArgs, if it's present - if(volArgs.getAdmin() != null) { - httpPost.removeHeaders(HttpHeaders.AUTHORIZATION); - httpPost.addHeader(HttpHeaders.AUTHORIZATION, - Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " + - volArgs.getAdmin()); - } - httpPost.addHeader(Header.OZONE_USER, owner); - LOG.info("Creating Volume: {}, with {} as owner and quota set to {}.", - volumeName, owner, quota == null ? "default" : quota); - EntityUtils.consume(executeHttpRequest(httpPost)); - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - - @Override - public void setVolumeOwner(String volumeName, String owner) - throws IOException { - try { - HddsClientUtils.verifyResourceName(volumeName); - Preconditions.checkNotNull(owner); - URIBuilder builder = new URIBuilder(ozoneRestUri); - builder.setPath(PATH_SEPARATOR + volumeName); - HttpPut httpPut = new HttpPut(builder.build()); - addOzoneHeaders(httpPut); - httpPut.addHeader(Header.OZONE_USER, owner); - EntityUtils.consume(executeHttpRequest(httpPut)); - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - @Override - public void setVolumeQuota(String volumeName, OzoneQuota quota) - throws IOException { - try { - HddsClientUtils.verifyResourceName(volumeName); - Preconditions.checkNotNull(quota); - String quotaString = quota.toString(); - URIBuilder builder = new URIBuilder(ozoneRestUri); - builder.setPath(PATH_SEPARATOR + volumeName); - builder.setParameter(Header.OZONE_QUOTA_QUERY_TAG, quotaString); - HttpPut httpPut = new HttpPut(builder.build()); - addOzoneHeaders(httpPut); - EntityUtils.consume(executeHttpRequest(httpPut)); - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - @Override - public OzoneVolume getVolumeDetails(String volumeName) - throws IOException { - try { - HddsClientUtils.verifyResourceName(volumeName); - URIBuilder builder = new URIBuilder(ozoneRestUri); - builder.setPath(PATH_SEPARATOR + volumeName); - builder.setParameter(Header.OZONE_INFO_QUERY_TAG, - Header.OZONE_INFO_QUERY_VOLUME); - HttpGet httpGet = new HttpGet(builder.build()); - addOzoneHeaders(httpGet); - HttpEntity response = executeHttpRequest(httpGet); - VolumeInfo volInfo = - VolumeInfo.parse(EntityUtils.toString(response)); - //TODO: OzoneHandler in datanode has to be modified to send ACLs - OzoneVolume volume = new OzoneVolume(conf, - this, - volInfo.getVolumeName(), - volInfo.getCreatedBy(), - volInfo.getOwner().getName(), - volInfo.getQuota().sizeInBytes(), - HddsClientUtils.formatDateTime(volInfo.getCreatedOn()), - null); - EntityUtils.consume(response); - return volume; - } catch (URISyntaxException | ParseException e) { - throw new IOException(e); - } - } - - @Override - public boolean checkVolumeAccess(String volumeName, OzoneAcl acl) - throws IOException { - throw new UnsupportedOperationException("Not yet implemented."); - } - - @Override - public void deleteVolume(String volumeName) throws IOException { - try { - HddsClientUtils.verifyResourceName(volumeName); - URIBuilder builder = new URIBuilder(ozoneRestUri); - builder.setPath(PATH_SEPARATOR + volumeName); - HttpDelete httpDelete = new HttpDelete(builder.build()); - addOzoneHeaders(httpDelete); - EntityUtils.consume(executeHttpRequest(httpDelete)); - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - @Override - public List listVolumes(String volumePrefix, String prevKey, - int maxListResult) - throws IOException { - return listVolumes(null, volumePrefix, prevKey, maxListResult); - } - - @Override - public List listVolumes(String user, String volumePrefix, - String prevKey, int maxListResult) - throws IOException { - try { - URIBuilder builder = new URIBuilder(ozoneRestUri); - builder.setPath(PATH_SEPARATOR); - builder.addParameter(Header.OZONE_INFO_QUERY_TAG, - Header.OZONE_LIST_QUERY_SERVICE); - builder.addParameter(Header.OZONE_LIST_QUERY_MAXKEYS, - String.valueOf(maxListResult)); - addQueryParamter(Header.OZONE_LIST_QUERY_PREFIX, volumePrefix, builder); - addQueryParamter(Header.OZONE_LIST_QUERY_PREVKEY, prevKey, builder); - HttpGet httpGet = new HttpGet(builder.build()); - if (!Strings.isNullOrEmpty(user)) { - httpGet.addHeader(Header.OZONE_USER, user); - } - addOzoneHeaders(httpGet); - HttpEntity response = executeHttpRequest(httpGet); - ListVolumes volumeList = - ListVolumes.parse(EntityUtils.toString(response)); - EntityUtils.consume(response); - return volumeList.getVolumes().stream().map(volInfo -> { - long creationTime = 0; - try { - creationTime = HddsClientUtils.formatDateTime(volInfo.getCreatedOn()); - } catch (ParseException e) { - LOG.warn("Parse exception in getting creation time for volume", e); - } - return new OzoneVolume(conf, this, volInfo.getVolumeName(), - volInfo.getCreatedBy(), volInfo.getOwner().getName(), - volInfo.getQuota().sizeInBytes(), creationTime, null); - }).collect(Collectors.toList()); - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - @Override - public void createBucket(String volumeName, String bucketName) - throws IOException { - createBucket(volumeName, bucketName, BucketArgs.newBuilder().build()); - } - - @Override - public void createBucket( - String volumeName, String bucketName, BucketArgs bucketArgs) - throws IOException { - try { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - Preconditions.checkNotNull(bucketArgs); - URIBuilder builder = new URIBuilder(ozoneRestUri); - OzoneConsts.Versioning versioning = OzoneConsts.Versioning.DISABLED; - if(bucketArgs.getVersioning() != null && - bucketArgs.getVersioning()) { - versioning = OzoneConsts.Versioning.ENABLED; - } - StorageType storageType = bucketArgs.getStorageType() == null ? - StorageType.DEFAULT : bucketArgs.getStorageType(); - - builder.setPath(PATH_SEPARATOR + volumeName + - PATH_SEPARATOR + bucketName); - HttpPost httpPost = new HttpPost(builder.build()); - addOzoneHeaders(httpPost); - - //ACLs from BucketArgs - if(bucketArgs.getAcls() != null) { - for (OzoneAcl acl : bucketArgs.getAcls()) { - httpPost.addHeader( - Header.OZONE_ACLS, Header.OZONE_ACL_ADD + " " + acl.toString()); - } - } - httpPost.addHeader(Header.OZONE_STORAGE_TYPE, storageType.toString()); - httpPost.addHeader(Header.OZONE_BUCKET_VERSIONING, - versioning.toString()); - LOG.info("Creating Bucket: {}/{}, with Versioning {} and Storage Type" + - " set to {}", volumeName, bucketName, versioning, - storageType); - - EntityUtils.consume(executeHttpRequest(httpPost)); - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - @Override - public void setBucketVersioning( - String volumeName, String bucketName, Boolean versioning) - throws IOException { - try { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - Preconditions.checkNotNull(versioning); - URIBuilder builder = new URIBuilder(ozoneRestUri); - - builder.setPath(PATH_SEPARATOR + volumeName + - PATH_SEPARATOR + bucketName); - HttpPut httpPut = new HttpPut(builder.build()); - addOzoneHeaders(httpPut); - - httpPut.addHeader(Header.OZONE_BUCKET_VERSIONING, - getBucketVersioning(versioning).toString()); - EntityUtils.consume(executeHttpRequest(httpPut)); - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - @Override - public void setBucketStorageType( - String volumeName, String bucketName, StorageType storageType) - throws IOException { - try { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - Preconditions.checkNotNull(storageType); - URIBuilder builder = new URIBuilder(ozoneRestUri); - - builder.setPath(PATH_SEPARATOR + volumeName + - PATH_SEPARATOR + bucketName); - HttpPut httpPut = new HttpPut(builder.build()); - addOzoneHeaders(httpPut); - - httpPut.addHeader(Header.OZONE_STORAGE_TYPE, storageType.toString()); - EntityUtils.consume(executeHttpRequest(httpPut)); - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - @Override - public void deleteBucket(String volumeName, String bucketName) - throws IOException { - try { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - URIBuilder builder = new URIBuilder(ozoneRestUri); - builder.setPath(PATH_SEPARATOR + volumeName + - PATH_SEPARATOR + bucketName); - HttpDelete httpDelete = new HttpDelete(builder.build()); - addOzoneHeaders(httpDelete); - EntityUtils.consume(executeHttpRequest(httpDelete)); - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - @Override - public void checkBucketAccess(String volumeName, String bucketName) - throws IOException { - throw new UnsupportedOperationException("Not yet implemented."); - } - - @Override - public OzoneBucket getBucketDetails(String volumeName, String bucketName) - throws IOException { - try { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - URIBuilder builder = new URIBuilder(ozoneRestUri); - builder.setPath(PATH_SEPARATOR + volumeName + - PATH_SEPARATOR + bucketName); - builder.setParameter(Header.OZONE_INFO_QUERY_TAG, - Header.OZONE_INFO_QUERY_BUCKET); - HttpGet httpGet = new HttpGet(builder.build()); - addOzoneHeaders(httpGet); - HttpEntity response = executeHttpRequest(httpGet); - BucketInfo bucketInfo = - BucketInfo.parse(EntityUtils.toString(response)); - OzoneBucket bucket = new OzoneBucket(conf, - this, - bucketInfo.getVolumeName(), - bucketInfo.getBucketName(), - bucketInfo.getStorageType(), - getBucketVersioningFlag(bucketInfo.getVersioning()), - HddsClientUtils.formatDateTime(bucketInfo.getCreatedOn()), - new HashMap<>()); - EntityUtils.consume(response); - return bucket; - } catch (URISyntaxException | ParseException e) { - throw new IOException(e); - } - } - - @Override - public List listBuckets(String volumeName, String bucketPrefix, - String prevBucket, int maxListResult) - throws IOException { - try { - HddsClientUtils.verifyResourceName(volumeName); - URIBuilder builder = new URIBuilder(ozoneRestUri); - builder.setPath(PATH_SEPARATOR + volumeName); - builder.addParameter(Header.OZONE_INFO_QUERY_TAG, - Header.OZONE_INFO_QUERY_BUCKET); - builder.addParameter(Header.OZONE_LIST_QUERY_MAXKEYS, - String.valueOf(maxListResult)); - addQueryParamter(Header.OZONE_LIST_QUERY_PREFIX, bucketPrefix, builder); - addQueryParamter(Header.OZONE_LIST_QUERY_PREVKEY, prevBucket, builder); - HttpGet httpGet = new HttpGet(builder.build()); - addOzoneHeaders(httpGet); - HttpEntity response = executeHttpRequest(httpGet); - ListBuckets bucketList = - ListBuckets.parse(EntityUtils.toString(response)); - EntityUtils.consume(response); - return bucketList.getBuckets().stream().map(bucketInfo -> { - long creationTime = 0; - try { - creationTime = - HddsClientUtils.formatDateTime(bucketInfo.getCreatedOn()); - } catch (ParseException e) { - LOG.warn("Parse exception in getting creation time for volume", e); - } - return new OzoneBucket(conf, this, volumeName, - bucketInfo.getBucketName(), bucketInfo.getStorageType(), - getBucketVersioningFlag(bucketInfo.getVersioning()), creationTime, - new HashMap<>(), bucketInfo.getEncryptionKeyName()); - }).collect(Collectors.toList()); - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - /** - * Writes a key in an existing bucket. - * - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * @param keyName Name of the Key - * @param size Size of the data - * @param type - * @param factor @return {@link OzoneOutputStream} - */ - @Override - public OzoneOutputStream createKey( - String volumeName, String bucketName, String keyName, long size, - ReplicationType type, ReplicationFactor factor, - Map metadata) - throws IOException { - // TODO: Once ReplicationType and ReplicationFactor are supported in - // OzoneHandler (in Datanode), set them in header. - try { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - HddsClientUtils.checkNotNull(keyName, type, factor); - URIBuilder builder = new URIBuilder(ozoneRestUri); - builder.setPath(PATH_SEPARATOR + volumeName + - PATH_SEPARATOR + bucketName + - PATH_SEPARATOR + keyName); - HttpPut putRequest = new HttpPut(builder.build()); - addOzoneHeaders(putRequest); - PipedInputStream in = new PipedInputStream(); - OutputStream out = new PipedOutputStream(in); - putRequest.setEntity(new InputStreamEntity(in, size)); - FutureTask futureTask = - new FutureTask<>(() -> executeHttpRequest(putRequest)); - new Thread(futureTask).start(); - OzoneOutputStream outputStream = new OzoneOutputStream( - new OutputStream() { - @Override - public void write(int b) throws IOException { - out.write(b); - } - - @Override - public void close() throws IOException { - try { - out.close(); - EntityUtils.consume(futureTask.get()); - } catch (ExecutionException | InterruptedException e) { - throw new IOException(e); - } - } - }); - - return outputStream; - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - /** - * Get a valid Delegation Token. Not supported for RestClient. - * - * @param renewer the designated renewer for the token - * @return Token - * @throws IOException - */ - @Override - public Token getDelegationToken(Text renewer) - throws IOException { - throw new IOException("Method not supported"); - } - - /** - * Renew an existing delegation token. Not supported for RestClient. - * - * @param token delegation token obtained earlier - * @return the new expiration time - * @throws IOException - */ - @Override - public long renewDelegationToken(Token token) - throws IOException { - throw new IOException("Method not supported"); - } - - /** - * Cancel an existing delegation token. Not supported for RestClient. - * - * @param token delegation token - * @throws IOException - */ - @Override - public void cancelDelegationToken(Token token) - throws IOException { - throw new IOException("Method not supported"); - } - - @Override - public S3SecretValue getS3Secret(String kerberosID) throws IOException { - throw new UnsupportedOperationException("Ozone REST protocol does not " + - "support this operation."); - } - - @Override - public OMFailoverProxyProvider getOMProxyProvider() { - return null; - } - - @Override - public KeyProvider getKeyProvider() throws IOException { - // TODO: fix me to support kms instances for difference OMs - return OzoneKMSUtil.getKeyProvider(conf, getKeyProviderUri()); - } - - @Override - public URI getKeyProviderUri() throws IOException { - return OzoneKMSUtil.getKeyProviderUri(ugi, null, null, conf); - } - - @Override - public OzoneInputStream getKey( - String volumeName, String bucketName, String keyName) - throws IOException { - try { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - Preconditions.checkNotNull(keyName); - URIBuilder builder = new URIBuilder(ozoneRestUri); - builder.setPath(PATH_SEPARATOR + volumeName + - PATH_SEPARATOR + bucketName + - PATH_SEPARATOR + keyName); - HttpGet getRequest = new HttpGet(builder.build()); - addOzoneHeaders(getRequest); - HttpEntity entity = executeHttpRequest(getRequest); - PipedInputStream in = new PipedInputStream(); - OutputStream out = new PipedOutputStream(in); - FutureTask futureTask = - new FutureTask<>(() -> { - entity.writeTo(out); - out.close(); - return null; - }); - new Thread(futureTask).start(); - OzoneInputStream inputStream = new OzoneInputStream( - new InputStream() { - - @Override - public int read() throws IOException { - return in.read(); - } - - @Override - public void close() throws IOException { - in.close(); - EntityUtils.consume(entity); - } - }); - - return inputStream; - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - @Override - public void deleteKey(String volumeName, String bucketName, String keyName) - throws IOException { - try { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - Preconditions.checkNotNull(keyName); - URIBuilder builder = new URIBuilder(ozoneRestUri); - builder.setPath(PATH_SEPARATOR + volumeName + - PATH_SEPARATOR + bucketName + PATH_SEPARATOR + keyName); - HttpDelete httpDelete = new HttpDelete(builder.build()); - addOzoneHeaders(httpDelete); - EntityUtils.consume(executeHttpRequest(httpDelete)); - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - @Override - public void renameKey(String volumeName, String bucketName, - String fromKeyName, String toKeyName) throws IOException { - try { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - HddsClientUtils.checkNotNull(fromKeyName, toKeyName); - URIBuilder builder = new URIBuilder(ozoneRestUri); - builder.setPath(PATH_SEPARATOR + volumeName + PATH_SEPARATOR + bucketName - + PATH_SEPARATOR + fromKeyName); - builder.addParameter(Header.OZONE_RENAME_TO_KEY_PARAM_NAME, toKeyName); - HttpPost httpPost = new HttpPost(builder.build()); - addOzoneHeaders(httpPost); - EntityUtils.consume(executeHttpRequest(httpPost)); - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - @Override - public List listKeys(String volumeName, String bucketName, - String keyPrefix, String prevKey, - int maxListResult) - throws IOException { - try { - HddsClientUtils.verifyResourceName(volumeName); - URIBuilder builder = new URIBuilder(ozoneRestUri); - builder - .setPath(PATH_SEPARATOR + volumeName + PATH_SEPARATOR + bucketName); - builder.addParameter(Header.OZONE_INFO_QUERY_TAG, - Header.OZONE_INFO_QUERY_KEY); - builder.addParameter(Header.OZONE_LIST_QUERY_MAXKEYS, - String.valueOf(maxListResult)); - addQueryParamter(Header.OZONE_LIST_QUERY_PREFIX, keyPrefix, builder); - addQueryParamter(Header.OZONE_LIST_QUERY_PREVKEY, prevKey, builder); - HttpGet httpGet = new HttpGet(builder.build()); - addOzoneHeaders(httpGet); - HttpEntity response = executeHttpRequest(httpGet); - ListKeys keyList = ListKeys.parse(EntityUtils.toString(response)); - EntityUtils.consume(response); - return keyList.getKeyList().stream().map(keyInfo -> { - long creationTime = 0, modificationTime = 0; - try { - creationTime = HddsClientUtils.formatDateTime(keyInfo.getCreatedOn()); - modificationTime = - HddsClientUtils.formatDateTime(keyInfo.getModifiedOn()); - } catch (ParseException e) { - LOG.warn("Parse exception in getting creation time for volume", e); - } - return new OzoneKey(volumeName, bucketName, keyInfo.getKeyName(), - keyInfo.getSize(), creationTime, modificationTime, - ReplicationType.valueOf(keyInfo.getType().toString())); - }).collect(Collectors.toList()); - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - @Override - public OzoneKeyDetails getKeyDetails( - String volumeName, String bucketName, String keyName) - throws IOException { - try { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - Preconditions.checkNotNull(keyName); - URIBuilder builder = new URIBuilder(ozoneRestUri); - builder.setPath(PATH_SEPARATOR + volumeName + - PATH_SEPARATOR + bucketName + PATH_SEPARATOR + keyName); - builder.setParameter(Header.OZONE_INFO_QUERY_TAG, - Header.OZONE_INFO_QUERY_KEY_DETAIL); - HttpGet httpGet = new HttpGet(builder.build()); - addOzoneHeaders(httpGet); - HttpEntity response = executeHttpRequest(httpGet); - KeyInfoDetails keyInfo = - KeyInfoDetails.parse(EntityUtils.toString(response)); - - List ozoneKeyLocations = new ArrayList<>(); - keyInfo.getKeyLocations().forEach((a) -> ozoneKeyLocations.add( - new OzoneKeyLocation(a.getContainerID(), a.getLocalID(), - a.getLength(), a.getOffset()))); - OzoneKeyDetails key = new OzoneKeyDetails(volumeName, - bucketName, - keyInfo.getKeyName(), - keyInfo.getSize(), - HddsClientUtils.formatDateTime(keyInfo.getCreatedOn()), - HddsClientUtils.formatDateTime(keyInfo.getModifiedOn()), - ozoneKeyLocations, ReplicationType.valueOf( - keyInfo.getType().toString()), - new HashMap<>(), keyInfo.getFileEncryptionInfo()); - EntityUtils.consume(response); - return key; - } catch (URISyntaxException | ParseException e) { - throw new IOException(e); - } - } - - @Override - public void createS3Bucket(String userName, String s3BucketName) - throws IOException { - throw new UnsupportedOperationException("Ozone REST protocol does not " + - "support this operation."); - } - - @Override - public void deleteS3Bucket(String s3BucketName) - throws IOException { - throw new UnsupportedOperationException("Ozone REST protocol does not " + - "support this operation."); - } - - @Override - public String getOzoneBucketMapping(String s3BucketName) throws IOException { - throw new UnsupportedOperationException("Ozone REST protocol does not " + - "support this operation."); - } - - @Override - public String getOzoneVolumeName(String s3BucketName) throws IOException { - throw new UnsupportedOperationException("Ozone REST protocol does not " + - "support this operation."); - } - - @Override - public String getOzoneBucketName(String s3BucketName) throws IOException { - throw new UnsupportedOperationException("Ozone REST protocol does not " + - "support this operation."); - } - - @Override - public List listS3Buckets(String userName, String bucketPrefix, - String prevBucket, int maxListResult) - throws IOException { - throw new UnsupportedOperationException("Ozone REST protocol does not " + - "support this operation."); - } - - /** - * Adds Ozone headers to http request. - * - * @param httpRequest Http Request - */ - private void addOzoneHeaders(HttpUriRequest httpRequest) { - httpRequest.addHeader(HttpHeaders.AUTHORIZATION, - Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " + - ugi.getUserName()); - httpRequest.addHeader(HttpHeaders.DATE, - HddsClientUtils.formatDateTime(Time.monotonicNow())); - httpRequest.addHeader(Header.OZONE_VERSION_HEADER, - Header.OZONE_V1_VERSION_HEADER); - } - - /** - * Sends the http request to server and returns the response HttpEntity. - * It's responsibility of the caller to consume and close response HttpEntity - * by calling {@code EntityUtils.consume} - * - * @param httpUriRequest http request - * @throws IOException - */ - private HttpEntity executeHttpRequest(HttpUriRequest httpUriRequest) - throws IOException { - HttpResponse response = httpClient.execute(httpUriRequest); - int errorCode = response.getStatusLine().getStatusCode(); - HttpEntity entity = response.getEntity(); - if ((errorCode == HTTP_OK) || (errorCode == HTTP_CREATED)) { - return entity; - } - if (entity != null) { - throw new IOException( - OzoneException.parse(EntityUtils.toString(entity))); - } else { - throw new IOException("Unexpected null in http payload," + - " while processing request"); - } - } - - /** - * Converts OzoneConts.Versioning to boolean. - * - * @param version - * @return corresponding boolean value - */ - private Boolean getBucketVersioningFlag( - OzoneConsts.Versioning version) { - if(version != null) { - switch(version) { - case ENABLED: - return true; - case NOT_DEFINED: - case DISABLED: - default: - return false; - } - } - return false; - } - - /** - * Converts Bucket versioning flag into OzoneConts.Versioning. - * - * @param flag versioning flag - * @return corresponding OzoneConts.Versionin - */ - private OzoneConsts.Versioning getBucketVersioning(Boolean flag) { - if(flag != null) { - if(flag) { - return OzoneConsts.Versioning.ENABLED; - } else { - return OzoneConsts.Versioning.DISABLED; - } - } - return OzoneConsts.Versioning.NOT_DEFINED; - } - - @Override - public void close() throws IOException { - httpClient.close(); - } - - private void addQueryParamter(String param, String value, - URIBuilder builder) { - if (!Strings.isNullOrEmpty(value)) { - builder.addParameter(param, value); - } - } - - @Override - public OmMultipartInfo initiateMultipartUpload(String volumeName, - String bucketName, - String keyName, - ReplicationType type, - ReplicationFactor factor) - throws IOException { - throw new UnsupportedOperationException("Ozone REST protocol does not " + - "support this operation."); - } - - @Override - public OzoneOutputStream createMultipartKey(String volumeName, - String bucketName, - String keyName, - long size, - int partNumber, - String uploadID) - throws IOException { - throw new UnsupportedOperationException("Ozone REST protocol does not " + - "support this operation."); - } - - @Override - public OmMultipartUploadCompleteInfo completeMultipartUpload( - String volumeName, String bucketName, String keyName, String uploadID, - Map partsMap) throws IOException { - throw new UnsupportedOperationException("Ozone REST protocol does not " + - "support this operation."); - } - - @Override - public void abortMultipartUpload(String volumeName, - String bucketName, String keyName, String uploadID) throws IOException { - throw new UnsupportedOperationException("Ozone REST protocol does not " + - "support this operation."); - } - - @Override - public OzoneMultipartUploadPartListParts listParts(String volumeName, - String bucketName, String keyName, String uploadID, int partNumberMarker, - int maxParts) throws IOException { - throw new UnsupportedOperationException("Ozone REST protocol does not " + - "support this operation."); - } - - /** - * Get CanonicalServiceName for ozone delegation token. - * @return Canonical Service Name of ozone delegation token. - */ - public String getCanonicalServiceName(){ - throw new UnsupportedOperationException("Ozone REST protocol does not " + - "support this operation."); - } - - @Override - public OzoneFileStatus getOzoneFileStatus(String volumeName, - String bucketName, String keyName) throws IOException { - throw new UnsupportedOperationException("Ozone REST protocol does not " + - "support this operation."); - } - - @Override - public void createDirectory(String volumeName, String bucketName, - String keyName) { - throw new UnsupportedOperationException( - "Ozone REST protocol does not " + "support this operation."); - } - - @Override - public OzoneInputStream readFile(String volumeName, String bucketName, - String keyName) { - throw new UnsupportedOperationException( - "Ozone REST protocol does not " + "support this operation."); - } - - @Override - public OzoneOutputStream createFile(String volumeName, String bucketName, - String keyName, long size, ReplicationType type, ReplicationFactor factor, - boolean overWrite, boolean recursive) { - throw new UnsupportedOperationException( - "Ozone REST protocol does not " + "support this operation."); - } - - @Override - public List listStatus(String volumeName, String bucketName, - String keyName, boolean recursive, String startKey, long numEntries) - throws IOException { - throw new UnsupportedOperationException( - "Ozone REST protocol does not " + "support this operation."); - } - - /** - * Add acl for Ozone object. Return true if acl is added successfully else - * false. - * - * @param obj Ozone object for which acl should be added. - * @param acl ozone acl top be added. - * @throws IOException if there is error. - */ - @Override - public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException { - throw new UnsupportedOperationException("Ozone REST protocol does not" + - " support this operation."); - } - - /** - * Remove acl for Ozone object. Return true if acl is removed successfully - * else false. - * - * @param obj Ozone object. - * @param acl Ozone acl to be removed. - * @throws IOException if there is error. - */ - @Override - public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { - throw new UnsupportedOperationException("Ozone REST protocol does not" + - " support this operation."); - } - - /** - * Acls to be set for given Ozone object. This operations reset ACL for given - * object to list of ACLs provided in argument. - * - * @param obj Ozone object. - * @param acls List of acls. - * @throws IOException if there is error. - */ - @Override - public boolean setAcl(OzoneObj obj, List acls) throws IOException { - throw new UnsupportedOperationException("Ozone REST protocol does not" + - " support this operation."); - } - - /** - * Returns list of ACLs for given Ozone object. - * - * @param obj Ozone object. - * @throws IOException if there is error. - */ - @Override - public List getAcl(OzoneObj obj) throws IOException { - throw new UnsupportedOperationException("Ozone REST protocol does not" + - " support this operation."); - } -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestServerSelector.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestServerSelector.java deleted file mode 100644 index fbd6eb8ea9a..00000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestServerSelector.java +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.rest; - -import org.apache.hadoop.ozone.om.helpers.ServiceInfo; - -import java.util.List; - -/** - * The implementor of this interface should select the REST server which will - * be used by the client to connect to Ozone Cluster, given list of - * REST Servers/DataNodes (DataNodes are the ones which hosts REST Service). - */ -public interface RestServerSelector { - - /** - * Returns the REST Service which will be used by the client for connection. - * - * @param restServices list of available REST servers - * @return ServiceInfo - */ - ServiceInfo getRestServer(List restServices); - -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/exceptions/package-info.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/exceptions/package-info.java deleted file mode 100644 index 233e7882e2d..00000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/exceptions/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.client.rest.exceptions; - -/** - * This package contains ozone rest client libraries. - */ diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/headers/package-info.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/headers/package-info.java deleted file mode 100644 index 340709f492d..00000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/headers/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -@InterfaceAudience.Private -package org.apache.hadoop.ozone.client.rest.headers; - -import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/package-info.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/package-info.java deleted file mode 100644 index ebcc104811f..00000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.rest; - -/** - * This package contains Ozone rest client library classes. - */ diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/OzoneException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/OzoneException.java deleted file mode 100644 index 953e3991f7b..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/OzoneException.java +++ /dev/null @@ -1,267 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.rest; - - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; - -import com.fasterxml.jackson.annotation.JsonAutoDetect; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; - -/** - * Class the represents various errors returned by the - * Ozone Layer. - */ -@InterfaceAudience.Private -public class OzoneException extends Exception { - - private static final ObjectReader READER = - new ObjectMapper().readerFor(OzoneException.class); - private static final ObjectMapper MAPPER; - - static { - MAPPER = new ObjectMapper(); - MAPPER.setVisibility( - MAPPER.getSerializationConfig().getDefaultVisibilityChecker() - .withCreatorVisibility(JsonAutoDetect.Visibility.NONE) - .withFieldVisibility(JsonAutoDetect.Visibility.NONE) - .withGetterVisibility(JsonAutoDetect.Visibility.NONE) - .withIsGetterVisibility(JsonAutoDetect.Visibility.NONE) - .withSetterVisibility(JsonAutoDetect.Visibility.NONE)); - } - - @JsonProperty("httpCode") - private long httpCode; - @JsonProperty("shortMessage") - private String shortMessage; - @JsonProperty("resource") - private String resource; - @JsonProperty("message") - private String message; - @JsonProperty("requestID") - private String requestId; - @JsonProperty("hostName") - private String hostID; - - /** - * Constructs a new exception with {@code null} as its detail message. The - * cause is not initialized, and may subsequently be initialized by a call - * to {@link #initCause}. - * - * This constructor is needed by Json Serializer. - */ - public OzoneException() { - } - - - /** - * Constructor that allows a shortMessage and exception. - * - * @param httpCode Error Code - * @param shortMessage Short Message - * @param ex Exception - */ - public OzoneException(long httpCode, String shortMessage, Exception ex) { - super(ex); - this.message = ex.getMessage(); - this.shortMessage = shortMessage; - this.httpCode = httpCode; - } - - - /** - * Constructor that allows a shortMessage. - * - * @param httpCode Error Code - * @param shortMessage Short Message - */ - public OzoneException(long httpCode, String shortMessage) { - this.shortMessage = shortMessage; - this.httpCode = httpCode; - } - - /** - * Constructor that allows a shortMessage and long message. - * - * @param httpCode Error Code - * @param shortMessage Short Message - * @param message long error message - */ - public OzoneException(long httpCode, String shortMessage, String message) { - this.shortMessage = shortMessage; - this.message = message; - this.httpCode = httpCode; - } - - /** - * Constructor that allows a shortMessage, a long message and an exception. - * - * @param httpCode Error code - * @param shortMessage Short message - * @param message Long error message - * @param ex Exception - */ - public OzoneException(long httpCode, String shortMessage, - String message, Exception ex) { - super(ex); - this.shortMessage = shortMessage; - this.message = message; - this.httpCode = httpCode; - } - - /** - * Returns the Resource that was involved in the stackTraceString. - * - * @return String - */ - public String getResource() { - return resource; - } - - /** - * Sets Resource. - * - * @param resourceName - Name of the Resource - */ - public void setResource(String resourceName) { - this.resource = resourceName; - } - - /** - * Gets a detailed message for the error. - * - * @return String - */ - public String getMessage() { - return message; - } - - /** - * Sets the error message. - * - * @param longMessage - Long message - */ - public void setMessage(String longMessage) { - this.message = longMessage; - } - - /** - * Returns request Id. - * - * @return String - */ - public String getRequestId() { - return requestId; - } - - /** - * Sets request ID. - * - * @param ozoneRequestId Request ID generated by the Server - */ - public void setRequestId(String ozoneRequestId) { - this.requestId = ozoneRequestId; - } - - /** - * Returns short error string. - * - * @return String - */ - public String getShortMessage() { - return shortMessage; - } - - /** - * Sets short error string. - * - * @param shortError Short Error Code - */ - public void setShortMessage(String shortError) { - this.shortMessage = shortError; - } - - /** - * Returns hostID. - * - * @return String - */ - public String getHostID() { - return hostID; - } - - /** - * Sets host ID. - * - * @param hostName host Name - */ - public void setHostID(String hostName) { - this.hostID = hostName; - } - - /** - * Returns http error code. - * - * @return long - */ - public long getHttpCode() { - return httpCode; - } - - /** - * Sets http status. - * - * @param httpStatus http error code. - */ - public void setHttpCode(long httpStatus) { - this.httpCode = httpStatus; - } - - /** - * Returns a Json String. - * - * @return JSON representation of the Error - */ - public String toJsonString() { - try { - return MAPPER.writeValueAsString(this); - } catch (IOException ex) { - // TODO : Log this error on server side. - } - // TODO : Replace this with a JSON Object -- That represents this error. - return "500 Internal Server Error"; - } - - /** - * Parses an Exception record. - * - * @param jsonString - Exception in Json format. - * - * @return OzoneException Object - * - * @throws IOException - */ - public static OzoneException parse(String jsonString) throws IOException { - return READER.readValue(jsonString); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/headers/Header.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/headers/Header.java deleted file mode 100644 index 3e404937061..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/headers/Header.java +++ /dev/null @@ -1,74 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.rest.headers; - -import org.apache.hadoop.classification.InterfaceAudience; - -/** - * OZONE specific HTTP headers. - */ -@InterfaceAudience.Private -public final class Header { - public static final String OZONE_QUOTA_BYTES = "BYTES"; - public static final String OZONE_QUOTA_MB = "MB"; - public static final String OZONE_QUOTA_GB = "GB"; - public static final String OZONE_QUOTA_TB = "TB"; - public static final String OZONE_QUOTA_REMOVE = "remove"; - public static final String OZONE_QUOTA_UNDEFINED = "undefined"; - public static final String OZONE_EMPTY_STRING=""; - public static final String OZONE_DEFAULT_LIST_SIZE = "1000"; - - public static final String OZONE_USER = "x-ozone-user"; - public static final String OZONE_SIMPLE_AUTHENTICATION_SCHEME = "OZONE"; - public static final String OZONE_VERSION_HEADER = "x-ozone-version"; - public static final String OZONE_V1_VERSION_HEADER ="v1"; - - public static final String OZONE_LIST_QUERY_SERVICE = "service"; - - public static final String OZONE_INFO_QUERY_VOLUME = "volume"; - public static final String OZONE_INFO_QUERY_BUCKET = "bucket"; - public static final String OZONE_INFO_QUERY_KEY = "key"; - public static final String OZONE_INFO_QUERY_KEY_DETAIL = "key-detail"; - - public static final String OZONE_REQUEST_ID = "x-ozone-request-id"; - public static final String OZONE_SERVER_NAME = "x-ozone-server-name"; - - public static final String OZONE_STORAGE_TYPE = "x-ozone-storage-type"; - - public static final String OZONE_BUCKET_VERSIONING = - "x-ozone-bucket-versioning"; - - public static final String OZONE_ACLS = "x-ozone-acls"; - public static final String OZONE_ACL_ADD = "ADD"; - public static final String OZONE_ACL_REMOVE = "REMOVE"; - - public static final String OZONE_INFO_QUERY_TAG ="info"; - public static final String OZONE_QUOTA_QUERY_TAG ="quota"; - public static final String CONTENT_MD5 = "Content-MD5"; - public static final String OZONE_LIST_QUERY_PREFIX="prefix"; - public static final String OZONE_LIST_QUERY_MAXKEYS="max-keys"; - public static final String OZONE_LIST_QUERY_PREVKEY="prev-key"; - public static final String OZONE_LIST_QUERY_ROOTSCAN="root-scan"; - - public static final String OZONE_RENAME_TO_KEY_PARAM_NAME = "toKey"; - - private Header() { - // Never constructed. - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/headers/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/headers/package-info.java deleted file mode 100644 index 76bc206e53d..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/headers/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.client.rest.headers; - -/** - * Ozone HTTP Header utility. - */ \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/package-info.java deleted file mode 100644 index fc86dbb0576..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.client.rest; - -/** - * Ozone REST interface. - */ \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/BucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/BucketInfo.java deleted file mode 100644 index 61d051de98b..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/BucketInfo.java +++ /dev/null @@ -1,248 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.client.rest.response; - -import java.io.IOException; -import java.util.LinkedList; -import java.util.List; - -import org.apache.hadoop.hdds.protocol.StorageType; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.OzoneConsts; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; -import com.google.common.base.Preconditions; - -/** - * BucketInfo class is used used for parsing json response - * when BucketInfo Call is made. - */ -public class BucketInfo implements Comparable { - - private static final ObjectReader READER = - new ObjectMapper().readerFor(BucketInfo.class); - - private String volumeName; - private String bucketName; - private String createdOn; - private List acls; - private OzoneConsts.Versioning versioning; - private StorageType storageType; - private String bekName; - - /** - * Constructor for BucketInfo. - * - * @param volumeName - * @param bucketName - */ - public BucketInfo(String volumeName, String bucketName) { - this.volumeName = volumeName; - this.bucketName = bucketName; - } - - - /** - * Default constructor for BucketInfo. - */ - public BucketInfo() { - acls = new LinkedList<>(); - } - - /** - * Parse a JSON string into BucketInfo Object. - * - * @param jsonString Json String - * @return BucketInfo - * @throws IOException - */ - public static BucketInfo parse(String jsonString) throws IOException { - return READER.readValue(jsonString); - } - - /** - * Returns a List of ACLs set on the Bucket. - * - * @return List of Acl - */ - public List getAcls() { - return acls; - } - - /** - * Sets ACls. - * - * @param acls Acl list - */ - public void setAcls(List acls) { - this.acls = acls; - } - - /** - * Returns Storage Type info. - * - * @return Storage Type of the bucket - */ - public StorageType getStorageType() { - return storageType; - } - - /** - * Sets the Storage Type. - * - * @param storageType Storage Type - */ - public void setStorageType(StorageType storageType) { - this.storageType = storageType; - } - - /** - * Returns versioning. - * - * @return versioning Enum - */ - public OzoneConsts.Versioning getVersioning() { - return versioning; - } - - /** - * Sets Versioning. - * - * @param versioning - */ - public void setVersioning(OzoneConsts.Versioning versioning) { - this.versioning = versioning; - } - - - /** - * Gets bucket Name. - * - * @return String - */ - public String getBucketName() { - return bucketName; - } - - /** - * Sets bucket Name. - * - * @param bucketName Name of the bucket - */ - public void setBucketName(String bucketName) { - this.bucketName = bucketName; - } - - /** - * Sets creation time of the bucket. - * - * @param creationTime Date String - */ - public void setCreatedOn(String creationTime) { - this.createdOn = creationTime; - } - - /** - * Returns creation time. - * - * @return creation time of bucket. - */ - public String getCreatedOn() { - return createdOn; - } - - /** - * Returns Volume Name. - * - * @return String volume name - */ - public String getVolumeName() { - return volumeName; - } - - /** - * Sets the Volume Name of bucket. - * - * @param volumeName volumeName - */ - public void setVolumeName(String volumeName) { - this.volumeName = volumeName; - } - - /** - * Return bucket encryption key name. - * @return bucket encryption key name - */ - public String getEncryptionKeyName() { - return bekName; - } - - /** - * Sets the bucket encryption key name. - * @param name bucket encryption key name - */ - public void setEncryptionKeyName(String name) { - this.bekName = name; - } - - /** - * Compares this object with the specified object for order. Returns a - * negative integer, zero, or a positive integer as this object is less - * than, equal to, or greater than the specified object. - * - * Please note : BucketInfo compare functions are used only within the - * context of a volume, hence volume name is purposefully ignored in - * compareTo, equal and hashcode functions of this class. - */ - @Override - public int compareTo(BucketInfo o) { - Preconditions.checkState(o.getVolumeName().equals(this.getVolumeName())); - return this.bucketName.compareTo(o.getBucketName()); - } - - /** - * Checks if two bucketInfo's are equal. - * @param o Object BucketInfo - * @return True or False - */ - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof BucketInfo)) { - return false; - } - - BucketInfo that = (BucketInfo) o; - Preconditions.checkState(that.getVolumeName().equals(this.getVolumeName())); - return bucketName.equals(that.bucketName); - - } - - /** - * Hash Code for this object. - * @return int - */ - @Override - public int hashCode() { - return bucketName.hashCode(); - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/KeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/KeyInfo.java deleted file mode 100644 index 299826fa941..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/KeyInfo.java +++ /dev/null @@ -1,236 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.rest.response; - -import java.io.IOException; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.apache.commons.lang3.builder.HashCodeBuilder; -import org.apache.hadoop.hdds.client.ReplicationType; - -/** - * KeyInfo class is used used for parsing json response - * when KeyInfo Call is made. - */ -public class KeyInfo implements Comparable { - - private static final ObjectReader READER = - new ObjectMapper().readerFor(KeyInfo.class); - - private long version; - private String md5hash; - private String createdOn; - private String modifiedOn; - private long size; - private String keyName; - private ReplicationType type; - - /** - * Return replication type of the key. - * - * @return replication type - */ - public ReplicationType getType() { - return type; - } - - /** - * Set replication type of the key. - * - * @param replicationType - */ - public void setType(ReplicationType replicationType) { - this.type = replicationType; - } - - /** - * When this key was created. - * - * @return Date String - */ - public String getCreatedOn() { - return createdOn; - } - - /** - * When this key was modified. - * - * @return Date String - */ - public String getModifiedOn() { - return modifiedOn; - } - - /** - * When this key was created. - * - * @param createdOn Date String - */ - public void setCreatedOn(String createdOn) { - this.createdOn = createdOn; - } - - /** - * When this key was modified. - * - * @param modifiedOn Date String - */ - public void setModifiedOn(String modifiedOn) { - this.modifiedOn = modifiedOn; - } - - /** - * Gets the Key name of this object. - * - * @return String - */ - public String getKeyName() { - return keyName; - } - - /** - * Sets the Key name of this object. - * - * @param keyName String - */ - public void setKeyName(String keyName) { - this.keyName = keyName; - } - - /** - * Returns the MD5 Hash for the data of this key. - * - * @return String MD5 - */ - public String getMd5hash() { - return md5hash; - } - - /** - * Sets the MD5 value of this key. - * - * @param md5hash Md5 of this file - */ - public void setMd5hash(String md5hash) { - this.md5hash = md5hash; - } - - /** - * Number of bytes stored in the data part of this key. - * - * @return long size of the data file - */ - public long getSize() { - return size; - } - - /** - * Sets the size of the data part of this key. - * - * @param size Size in long - */ - public void setSize(long size) { - this.size = size; - } - - /** - * Version of this key. - * - * @return returns the version of this key. - */ - public long getVersion() { - return version; - } - - /** - * Sets the version of this key. - * - * @param version - Version String - */ - public void setVersion(long version) { - this.version = version; - } - - /** - * Compares this object with the specified object for order. Returns a - * negative integer, zero, or a positive integer as this object is less - * than, equal to, or greater than the specified object. - * - * @param o the object to be compared. - * @return a negative integer, zero, or a positive integer as this object - * is less than, equal to, or greater than the specified object. - * @throws NullPointerException if the specified object is null - * @throws ClassCastException if the specified object's type prevents it - * from being compared to this object. - */ - @Override - public int compareTo(KeyInfo o) { - if (this.keyName.compareTo(o.getKeyName()) != 0) { - return this.keyName.compareTo(o.getKeyName()); - } - - if (this.getVersion() == o.getVersion()) { - return 0; - } - if (this.getVersion() < o.getVersion()) { - return -1; - } - return 1; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - - if (o == null || getClass() != o.getClass()) { - return false; - } - - KeyInfo keyInfo = (KeyInfo) o; - - return new EqualsBuilder() - .append(version, keyInfo.version) - .append(keyName, keyInfo.keyName) - .isEquals(); - } - - @Override - public int hashCode() { - return new HashCodeBuilder(17, 37) - .append(version) - .append(keyName) - .toHashCode(); - } - - /** - * Parse a string to return KeyInfo Object. - * - * @param jsonString Json String - * @return keyInfo - * @throws IOException - */ - public static KeyInfo parse(String jsonString) throws IOException { - return READER.readValue(jsonString); - } - -} \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/KeyInfoDetails.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/KeyInfoDetails.java deleted file mode 100644 index b1532397162..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/KeyInfoDetails.java +++ /dev/null @@ -1,118 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.rest.response; - -import java.io.IOException; -import java.util.List; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.apache.commons.lang3.builder.HashCodeBuilder; -import org.apache.hadoop.fs.FileEncryptionInfo; - -/** - * KeyInfoDetails class is used for parsing json response - * when KeyInfoDetails Call is made. - */ -public class KeyInfoDetails extends KeyInfo { - - private static final ObjectReader READER = - new ObjectMapper().readerFor(KeyInfoDetails.class); - - /** - * a list of Map which maps localID to ContainerID - * to specify replica locations. - */ - private List keyLocations; - - private FileEncryptionInfo feInfo; - - /** - * Constructor needed for json serialization. - */ - public KeyInfoDetails() { - } - - /** - * Set details of key location. - * - * @param locations - details of key location - */ - public void setKeyLocation(List locations) { - this.keyLocations = locations; - } - - /** - * Returns details of key location. - * - * @return volumeName - */ - public List getKeyLocations() { - return keyLocations; - } - - public void setFileEncryptionInfo(FileEncryptionInfo info) { - this.feInfo = info; - } - - public FileEncryptionInfo getFileEncryptionInfo() { - return feInfo; - } - - /** - * Parse a string to return KeyInfoDetails Object. - * - * @param jsonString Json String - * @return KeyInfoDetails - * @throws IOException - */ - public static KeyInfoDetails parse(String jsonString) throws IOException { - return READER.readValue(jsonString); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - - if (o == null || getClass() != o.getClass()) { - return false; - } - - KeyInfoDetails that = (KeyInfoDetails) o; - - return new EqualsBuilder() - .append(getVersion(), that.getVersion()) - .append(getKeyName(), that.getKeyName()) - .append(keyLocations, that.keyLocations) - .isEquals(); - } - - @Override - public int hashCode() { - return new HashCodeBuilder(21, 33) - .append(getVersion()) - .append(getKeyName()) - .append(keyLocations) - .toHashCode(); - } -} - diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/KeyLocation.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/KeyLocation.java deleted file mode 100644 index e5f46980ab1..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/KeyLocation.java +++ /dev/null @@ -1,89 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.rest.response; - -/** - * KeyLocation class is used used for parsing json response - * when KeyInfoDetails Call is made. - */ -public class KeyLocation { - /** - * Which container this key stored. - */ - private long containerID; - /** - * Which block this key stored inside a container. - */ - private long localID; - /** - * Data length of this key replica. - */ - private long length; - /** - * Offset of this key. - */ - private long offset; - - /** - * Empty constructor for Json serialization. - */ - public KeyLocation() { - - } - - /** - * Constructs KeyLocation. - */ - public KeyLocation(long containerID, long localID, - long length, long offset) { - this.containerID = containerID; - this.localID = localID; - this.length = length; - this.offset = offset; - } - - /** - * Returns the containerID of this Key. - */ - public long getContainerID() { - return containerID; - } - - /** - * Returns the localID of this Key. - */ - public long getLocalID() { - return localID; - } - - /** - * Returns the length of this Key. - */ - public long getLength() { - return length; - } - - /** - * Returns the offset of this Key. - */ - public long getOffset() { - return offset; - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/VolumeInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/VolumeInfo.java deleted file mode 100644 index f98b56a6d32..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/VolumeInfo.java +++ /dev/null @@ -1,215 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.rest.response; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdds.client.OzoneQuota; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; - -/** - * VolumeInfo Class is used for parsing json response - * when VolumeInfo Call is made. - */ -@InterfaceAudience.Private -public class VolumeInfo implements Comparable { - - - private static final ObjectReader READER = - new ObjectMapper().readerFor(VolumeInfo.class); - - private VolumeOwner owner; - private OzoneQuota quota; - private String volumeName; - private String createdOn; - private String createdBy; - - - /** - * Constructor for VolumeInfo. - * - * @param volumeName - Name of the Volume - * @param createdOn _ Date String - * @param createdBy - Person who created it - */ - public VolumeInfo(String volumeName, String createdOn, - String createdBy) { - this.volumeName = volumeName; - this.createdOn = createdOn; - this.createdBy = createdBy; - } - - /** - * Constructor for VolumeInfo. - */ - public VolumeInfo() { - } - - /** - * gets the volume name. - * - * @return Volume Name - */ - public String getVolumeName() { - return volumeName; - } - - /** - * Sets the volume name. - * - * @param volumeName Volume Name - */ - public void setVolumeName(String volumeName) { - this.volumeName = volumeName; - } - - - /** - * Returns the name of the person who created this volume. - * - * @return Name of Admin who created this - */ - public String getCreatedBy() { - return createdBy; - } - - /** - * Sets the user name of the person who created this volume. - * - * @param createdBy UserName - */ - public void setCreatedBy(String createdBy) { - this.createdBy = createdBy; - } - - /** - * Gets the date on which this volume was created. - * - * @return Date String - */ - public String getCreatedOn() { - return createdOn; - } - - /** - * Sets the date string. - * - * @param createdOn Date String - */ - public void setCreatedOn(String createdOn) { - this.createdOn = createdOn; - } - - /** - * Returns the owner info. - * - * @return OwnerInfo - */ - public VolumeOwner getOwner() { - return owner; - } - - /** - * Sets the owner. - * - * @param owner OwnerInfo - */ - public void setOwner(VolumeOwner owner) { - this.owner = owner; - } - - /** - * Returns the quota information on a volume. - * - * @return Quota - */ - public OzoneQuota getQuota() { - return quota; - } - - /** - * Sets the quota info. - * - * @param quota Quota Info - */ - public void setQuota(OzoneQuota quota) { - this.quota = quota; - } - - /** - * Comparable Interface. - * @param o VolumeInfo Object. - * @return Result of comparison - */ - @Override - public int compareTo(VolumeInfo o) { - return this.volumeName.compareTo(o.getVolumeName()); - } - - /** - * Returns VolumeInfo class from json string. - * - * @param data Json String - * - * @return VolumeInfo - * - * @throws IOException - */ - public static VolumeInfo parse(String data) throws IOException { - return READER.readValue(data); - } - - /** - * Indicates whether some other object is "equal to" this one. - * - * @param obj the reference object with which to compare. - * - * @return {@code true} if this object is the same as the obj - * argument; {@code false} otherwise. - */ - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - VolumeInfo otherInfo = (VolumeInfo) obj; - return otherInfo.getVolumeName().equals(this.getVolumeName()); - } - - /** - * Returns a hash code value for the object. This method is - * supported for the benefit of hash tables such as those provided by - * HashMap. - * @return a hash code value for this object. - * - * @see Object#equals(Object) - * @see System#identityHashCode - */ - @Override - public int hashCode() { - return getVolumeName().hashCode(); - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/VolumeOwner.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/VolumeOwner.java deleted file mode 100644 index d4dbad4cbf6..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/VolumeOwner.java +++ /dev/null @@ -1,61 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.rest.response; - -import org.apache.hadoop.classification.InterfaceAudience; - -import com.fasterxml.jackson.annotation.JsonInclude; - -/** - * Volume Owner represents the owner of a volume. - * - * This is a class instead of a string since we might need to extend this class - * to support other forms of authentication. - */ -@InterfaceAudience.Private -public class VolumeOwner { - @JsonInclude(JsonInclude.Include.NON_NULL) - private String name; - - /** - * Constructor for VolumeOwner. - * - * @param name name of the User - */ - public VolumeOwner(String name) { - this.name = name; - } - - /** - * Constructs Volume Owner. - */ - public VolumeOwner() { - name = null; - } - - /** - * Returns the user name. - * - * @return Name - */ - public String getName() { - return name; - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/package-info.java deleted file mode 100644 index 432b029b6fb..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.hadoop.ozone.client.rest.response; - -/** - * This package contains class for ozone rest client library. - */ \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java index fb496441aeb..dce4f8e20d2 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java @@ -24,13 +24,11 @@ import com.fasterxml.jackson.databind.ObjectReader; import com.fasterxml.jackson.databind.ObjectWriter; import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.client.rest.response.BucketInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .ServicePort; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType; -import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -165,27 +163,6 @@ public static ServiceInfo getFromProtobuf( serviceInfo.getServicePortsList()); } - /** - * Returns a JSON string of this object. - * - * @return String - json string - * @throws IOException - */ - public String toJsonString() throws IOException { - return WRITER.writeValueAsString(this); - } - - /** - * Parse a JSON string into ServiceInfo Object. - * - * @param jsonString Json String - * @return BucketInfo - * @throws IOException - */ - public static BucketInfo parse(String jsonString) throws IOException { - return READER.readValue(jsonString); - } - /** * Creates a new builder to build {@link ServiceInfo}. * @return {@link ServiceInfo.Builder} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketArgs.java deleted file mode 100644 index 63860dad80f..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketArgs.java +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.handlers; - -import org.apache.hadoop.hdds.protocol.StorageType; -import org.apache.hadoop.ozone.OzoneConsts; - -/** - * BucketArgs packages all bucket related arguments to - * file system calls. - */ -public class BucketArgs extends VolumeArgs { - private final String bucketName; - private OzoneConsts.Versioning versioning; - private StorageType storageType; - - /** - * Constructor for BucketArgs. - * - * @param volumeName - volumeName - * @param bucketName - bucket Name - * @param userArgs - userArgs - */ - public BucketArgs(String volumeName, String bucketName, UserArgs userArgs) { - super(volumeName, userArgs); - this.bucketName = bucketName; - this.versioning = OzoneConsts.Versioning.NOT_DEFINED; - this.storageType = null; - } - - - /** - * Constructor for BucketArgs. - * - * @param bucketName - bucket Name - * @param volumeArgs - volume Args - */ - public BucketArgs(String bucketName, VolumeArgs volumeArgs) { - super(volumeArgs); - this.bucketName = bucketName; - this.versioning = OzoneConsts.Versioning.NOT_DEFINED; - this.storageType = null; - } - - /** - * Constructor for BucketArgs. - * - * @param args - Bucket Args - */ - public BucketArgs(BucketArgs args) { - this(args.getBucketName(), args); - } - - /** - * Returns the Bucket Name. - * - * @return Bucket Name - */ - public String getBucketName() { - return bucketName; - } - - /** - * Returns Versioning Info. - * - * @return versioning - */ - public OzoneConsts.Versioning getVersioning() { - return versioning; - } - - - /** - * SetVersioning Info. - * - * @param versioning - Enum value - */ - public void setVersioning(OzoneConsts.Versioning versioning) { - this.versioning = versioning; - } - - /** - * returns the current Storage Class. - * - * @return Storage Class - */ - public StorageType getStorageType() { - return storageType; - } - - /** - * Sets the Storage Class. - * - * @param storageType Set Storage Class - */ - public void setStorageType(StorageType storageType) { - this.storageType = storageType; - } - - /** - * returns - Volume/bucketName. - * - * @return String - */ - @Override - public String getResourceName() { - return getVolumeName() + "/" + getBucketName(); - } - - /** - * Returns User/Volume name which is the parent of this - * bucket. - * - * @return String - */ - public String getParentName() { - return getUserName() + "/" + getVolumeName(); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyArgs.java deleted file mode 100644 index 48a4cb45136..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyArgs.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.handlers; - -/** - * Class that packages all key Arguments. - */ -public class KeyArgs extends BucketArgs { - private String key; - private String hash; - private long size; - - /** - * Constructor for Key Args. - * - * @param volumeName - Volume Name - * @param bucketName - Bucket Name - * @param objectName - Key - */ - public KeyArgs(String volumeName, String bucketName, - String objectName, UserArgs args) { - super(volumeName, bucketName, args); - this.key = objectName; - } - - /** - * Constructor for Key Args. - * - * @param objectName - Key - * @param args - Bucket Args - */ - public KeyArgs(String objectName, BucketArgs args) { - super(args); - this.key = objectName; - } - - /** - * Get Key Name. - * - * @return String - */ - public String getKeyName() { - return this.key; - } - - /** - * Computed File hash. - * - * @return String - */ - public String getHash() { - return hash; - } - - /** - * Sets the hash String. - * - * @param hash String - */ - public void setHash(String hash) { - this.hash = hash; - } - - /** - * Returns the file size. - * - * @return long - file size - */ - public long getSize() { - return size; - } - - /** - * Set Size. - * - * @param size Size of the file - */ - public void setSize(long size) { - this.size = size; - } - - /** - * Returns the name of the resource. - * - * @return String - */ - @Override - public String getResourceName() { - return super.getResourceName() + "/" + getKeyName(); - } - - /** - * Parent name of this resource. - * - * @return String. - */ - @Override - public String getParentName() { - return super.getResourceName(); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/ListArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/ListArgs.java deleted file mode 100644 index 49ca4a4f7e3..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/ListArgs.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.handlers; - -/** - * Supports listing keys with pagination. - */ -public class ListArgs { - private String prevKey; - private String prefix; - private int maxKeys; - private boolean rootScan; - private T args; - - /** - * Constructor for ListArgs. - * - * @param args - BucketArgs - * @param prefix Prefix to start Query from - * @param maxKeys Max result set - * @param prevKey - Page token - */ - public ListArgs(T args, String prefix, int maxKeys, - String prevKey) { - setArgs(args); - setPrefix(prefix); - setMaxKeys(maxKeys); - setPrevKey(prevKey); - } - - /** - * Copy Constructor for ListArgs. - * - * @param args - List Args - */ - public ListArgs(T args, ListArgs listArgs) { - this(args, listArgs.getPrefix(), listArgs.getMaxKeys(), - listArgs.getPrevKey()); - } - - /** - * Returns page token. - * - * @return String - */ - public String getPrevKey() { - return prevKey; - } - - /** - * Sets page token. - * - * @param prevKey - Page token - */ - public void setPrevKey(String prevKey) { - this.prevKey = prevKey; - } - - /** - * Gets max keys. - * - * @return int - */ - public int getMaxKeys() { - return maxKeys; - } - - /** - * Sets max keys. - * - * @param maxKeys - Maximum keys to return - */ - public void setMaxKeys(int maxKeys) { - this.maxKeys = maxKeys; - } - - /** - * Gets prefix. - * - * @return String - */ - public String getPrefix() { - return prefix; - } - - /** - * Sets prefix. - * - * @param prefix - The prefix that we are looking for - */ - public void setPrefix(String prefix) { - this.prefix = prefix; - } - - /** - * Gets args. - * @return T - */ - public T getArgs() { - return args; - } - - /** - * Sets args. - * @param args T - */ - public void setArgs(T args) { - this.args = args; - } - - /** - * Checks if we are doing a rootScan. - * @return - RootScan. - */ - public boolean isRootScan() { - return rootScan; - } - - /** - * Sets the RootScan property. - * @param rootScan - Boolean. - */ - public void setRootScan(boolean rootScan) { - this.rootScan = rootScan; - } - -} \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/UserArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/UserArgs.java deleted file mode 100644 index 8a75928a1c6..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/UserArgs.java +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.handlers; - -import org.apache.hadoop.classification.InterfaceAudience; - -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.Request; -import javax.ws.rs.core.UriInfo; -import java.util.Arrays; - -/** - * UserArgs is used to package caller info - * and pass it down to file system. - */ -@InterfaceAudience.Private -public class UserArgs { - private String userName; - private final String requestID; - private final String hostName; - private final UriInfo uri; - private final Request request; - private final HttpHeaders headers; - private String[] groups; - - - /** - * Constructs user args. - * - * @param userName - User name - * @param requestID - Request ID - * @param hostName - Host Name - * @param req - Request - * @param info - Uri Info - * @param httpHeaders - http headers - */ - public UserArgs(String userName, String requestID, String hostName, - Request req, UriInfo info, HttpHeaders httpHeaders) { - this.hostName = hostName; - this.userName = userName; - this.requestID = requestID; - this.uri = info; - this.request = req; - this.headers = httpHeaders; - } - - /** - * Constructs user args when we don't know the user name yet. - * - * @param requestID _ Request ID - * @param hostName - Host Name - * @param req - Request - * @param info - UriInfo - * @param httpHeaders - http headers - */ - public UserArgs(String requestID, String hostName, Request req, UriInfo info, - HttpHeaders httpHeaders) { - this.hostName = hostName; - this.requestID = requestID; - this.uri = info; - this.request = req; - this.headers = httpHeaders; - } - - /** - * Returns hostname. - * - * @return String - */ - public String getHostName() { - return hostName; - } - - /** - * Returns RequestID. - * - * @return Long - */ - public String getRequestID() { - return requestID; - } - - /** - * Returns User Name. - * - * @return String - */ - public String getUserName() { - return userName; - } - - /** - * Sets the user name. - * - * @param userName Name of the user - */ - public void setUserName(String userName) { - this.userName = userName; - } - - /** - * Returns list of groups. - * - * @return String[] - */ - public String[] getGroups() { - return groups != null ? - Arrays.copyOf(groups, groups.length) : null; - } - - /** - * Sets the group list. - * - * @param groups list of groups - */ - public void setGroups(String[] groups) { - if (groups != null) { - this.groups = Arrays.copyOf(groups, groups.length); - } - } - - /** - * Returns the resource Name. - * - * @return String Resource. - */ - public String getResourceName() { - return getUserName(); - } - - /** - * Returns Http Headers for this call. - * - * @return httpHeaders - */ - public HttpHeaders getHeaders() { - return headers; - } - - /** - * Returns Request Object. - * - * @return Request - */ - public Request getRequest() { - return request; - } - - /** - * Returns UriInfo. - * - * @return UriInfo - */ - public UriInfo getUri() { - return uri; - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeArgs.java deleted file mode 100644 index 6bafac1c917..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeArgs.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.handlers; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.ozone.web.request.OzoneQuota; - -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.Request; -import javax.ws.rs.core.UriInfo; - -/** - * VolumeArgs is used to package all volume - * related arguments in the call to underlying - * file system. - */ -@InterfaceAudience.Private -public class VolumeArgs extends UserArgs { - private String adminName; - private final String volumeName; - private OzoneQuota quota; - - /** - * Returns Quota Information. - * - * @return Quota - */ - public OzoneQuota getQuota() { - return quota; - } - - /** - * Returns volume name. - * - * @return String - */ - public String getVolumeName() { - return volumeName; - } - - /** - * Constructs volume Args. - * - * @param userName - User name - * @param volumeName - volume Name - * @param requestID _ Request ID - * @param hostName - Host Name - * @param request - Http Request - * @param info - URI info - * @param headers - http headers - * @param groups - list of groups allowed to access the volume - */ - @SuppressWarnings("parameternumber") - public VolumeArgs(String userName, String volumeName, String requestID, - String hostName, Request request, UriInfo info, - HttpHeaders headers, String[] groups) { - super(userName, requestID, hostName, request, info, headers); - super.setGroups(groups); - this.volumeName = volumeName; - } - - /** - * Constructs volume Args. - * - * @param volumeName - volume Name - * @param userArgs - userArgs - */ - public VolumeArgs(String volumeName, UserArgs userArgs) { - this(userArgs.getUserName(), volumeName, userArgs.getRequestID(), - userArgs.getHostName(), userArgs.getRequest(), userArgs.getUri(), - userArgs.getHeaders(), userArgs.getGroups()); - } - - /** - * Creates VolumeArgs from another VolumeArgs. - */ - public VolumeArgs(VolumeArgs volArgs) { - this(volArgs.getVolumeName(), volArgs); - } - - /** - * Sets Quota information. - * - * @param quota - Quota Sting - * @throws IllegalArgumentException - */ - public void setQuota(String quota) throws IllegalArgumentException { - this.quota = OzoneQuota.parseQuota(quota); - } - - /** - * Sets quota information. - * - * @param quota - OzoneQuota - */ - public void setQuota(OzoneQuota quota) { - this.quota = quota; - } - - /** - * Gets admin Name. - * - * @return - Admin Name - */ - public String getAdminName() { - return adminName; - } - - /** - * Sets Admin Name. - * - * @param adminName - Admin Name - */ - public void setAdminName(String adminName) { - this.adminName = adminName; - } - - /** - * Returns UserName/VolumeName. - * - * @return String - */ - @Override - public String getResourceName() { - return super.getResourceName() + "/" + getVolumeName(); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/package-info.java deleted file mode 100644 index a66a773c389..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.handlers; - -/** - * REST handler value classes. - */ \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/request/OzoneQuota.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/request/OzoneQuota.java deleted file mode 100644 index 9619ebdb7cd..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/request/OzoneQuota.java +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.request; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.client.rest.headers.Header; -import com.fasterxml.jackson.annotation.JsonIgnore; - -/** - * represents an OzoneQuota Object that can be applied to - * a storage volume. - */ -@InterfaceAudience.Private -public class OzoneQuota { - - private Units unit; - private int size; - - /** Quota Units.*/ - public enum Units {UNDEFINED, BYTES, MB, GB, TB} - - /** - * Returns size. - * - * @return int - */ - public int getSize() { - return size; - } - - /** - * Returns Units. - * - * @return Unit in MB, GB or TB - */ - public Units getUnit() { - return unit; - } - - /** - * Constructs a default Quota object. - */ - public OzoneQuota() { - this.size = 0; - this.unit = Units.UNDEFINED; - } - - /** - * Constructor for Ozone Quota. - * - * @param size - Integer Size - * @param unit MB, GB or TB - */ - public OzoneQuota(int size, Units unit) { - this.size = size; - this.unit = unit; - } - - /** - * Formats a quota as a string. - * - * @param quota the quota to format - * @return string representation of quota - */ - public static String formatQuota(OzoneQuota quota) { - return String.valueOf(quota.size) + quota.unit; - } - - /** - * Parses a user provided string and returns the - * Quota Object. - * - * @param quotaString Quota String - * - * @return OzoneQuota object - * - * @throws IllegalArgumentException - */ - public static OzoneQuota parseQuota(String quotaString) - throws IllegalArgumentException { - - if ((quotaString == null) || (quotaString.isEmpty())) { - throw new IllegalArgumentException( - "Quota string cannot be null or empty."); - } - - if (isRemove(quotaString)) { - throw new IllegalArgumentException("Remove is invalid in this context."); - } - - String uppercase = quotaString.toUpperCase().replaceAll("\\s+", ""); - String size = ""; - int nSize; - Units currUnit = Units.MB; - Boolean found = false; - if (uppercase.endsWith(Header.OZONE_QUOTA_MB)) { - size = uppercase - .substring(0, uppercase.length() - Header.OZONE_QUOTA_MB.length()); - currUnit = Units.MB; - found = true; - } - - if (uppercase.endsWith(Header.OZONE_QUOTA_GB)) { - size = uppercase - .substring(0, uppercase.length() - Header.OZONE_QUOTA_GB.length()); - currUnit = Units.GB; - found = true; - } - - if (uppercase.endsWith(Header.OZONE_QUOTA_TB)) { - size = uppercase - .substring(0, uppercase.length() - Header.OZONE_QUOTA_TB.length()); - currUnit = Units.TB; - found = true; - } - - if (uppercase.endsWith(Header.OZONE_QUOTA_BYTES)) { - size = uppercase - .substring(0, uppercase.length() - Header.OZONE_QUOTA_BYTES.length()); - currUnit = Units.BYTES; - found = true; - } - - if (!found) { - throw new IllegalArgumentException( - "Quota unit not recognized. Supported values are BYTES, MB, GB and " + - "TB."); - } - - nSize = Integer.parseInt(size); - if (nSize < 0) { - throw new IllegalArgumentException("Quota cannot be negative."); - } - - return new OzoneQuota(nSize, currUnit); - } - - - /** - * Checks if Quota String is just as remove operation. - * - * @param quotaString User provided quota String - * - * @return True if it is Remove, false otherwise - */ - public static boolean isRemove(String quotaString) { - - return (quotaString != null) && - (quotaString.compareToIgnoreCase(Header.OZONE_QUOTA_REMOVE) == 0); - } - - /** - * Returns size in Bytes or -1 if there is no Quota. - */ - @JsonIgnore - public long sizeInBytes() { - switch (this.unit) { - case BYTES: - return this.getSize(); - case MB: - return this.getSize() * OzoneConsts.MB; - case GB: - return this.getSize() * OzoneConsts.GB; - case TB: - return this.getSize() * OzoneConsts.TB; - case UNDEFINED: - default: - return -1; - } - } - - /** - * Returns OzoneQuota corresponding to size in bytes. - * - * @param sizeInBytes size in bytes to be converted - * - * @return OzoneQuota object - */ - public static OzoneQuota getOzoneQuota(long sizeInBytes) { - long size; - Units unit; - if (sizeInBytes % OzoneConsts.TB == 0) { - size = sizeInBytes / OzoneConsts.TB; - unit = Units.TB; - } else if (sizeInBytes % OzoneConsts.GB == 0) { - size = sizeInBytes / OzoneConsts.GB; - unit = Units.GB; - } else if (sizeInBytes % OzoneConsts.MB == 0) { - size = sizeInBytes / OzoneConsts.MB; - unit = Units.MB; - } else { - size = sizeInBytes; - unit = Units.BYTES; - } - return new OzoneQuota((int)size, unit); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/request/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/request/package-info.java deleted file mode 100644 index 4fbc18ff6e4..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/request/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/** - * Netty-based HTTP server implementation for Ozone. - */ -package org.apache.hadoop.ozone.web.request; \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java deleted file mode 100644 index b431f19c19a..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java +++ /dev/null @@ -1,335 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.response; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -import org.apache.hadoop.hdds.protocol.StorageType; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.web.utils.JsonUtils; - -import com.fasterxml.jackson.annotation.JsonAutoDetect; -import com.fasterxml.jackson.annotation.JsonFilter; -import com.fasterxml.jackson.annotation.PropertyAccessor; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; -import com.fasterxml.jackson.databind.ObjectWriter; -import com.fasterxml.jackson.databind.ser.FilterProvider; -import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter; -import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider; -import com.google.common.base.Preconditions; - -/** - * BucketInfo class, this is used as response class to send - * Json info about a bucket back to a client. - */ -public class BucketInfo implements Comparable { - static final String BUCKET_INFO = "BUCKET_INFO_FILTER"; - private static final ObjectReader READER = - new ObjectMapper().readerFor(BucketInfo.class); - private static final ObjectWriter WRITER; - - static { - ObjectMapper mapper = new ObjectMapper(); - String[] ignorableFieldNames = {"bytesUsed", "keyCount"}; - - FilterProvider filters = new SimpleFilterProvider().addFilter(BUCKET_INFO, - SimpleBeanPropertyFilter.serializeAllExcept(ignorableFieldNames)); - mapper.setVisibility(PropertyAccessor.FIELD, JsonAutoDetect.Visibility.ANY); - mapper.addMixIn(Object.class, MixIn.class); - - mapper.setFilterProvider(filters); - WRITER = mapper.writerWithDefaultPrettyPrinter(); - } - - private String volumeName; - private String bucketName; - private String createdOn; - private List acls; - private OzoneConsts.Versioning versioning; - private StorageType storageType; - private long bytesUsed; - private long keyCount; - private String encryptionKeyName; - - /** - * Constructor for BucketInfo. - * - * @param volumeName - * @param bucketName - */ - public BucketInfo(String volumeName, String bucketName) { - this.volumeName = volumeName; - this.bucketName = bucketName; - } - - - /** - * Default constructor for BucketInfo. - */ - public BucketInfo() { - acls = new ArrayList<>(); - } - - /** - * Parse a JSON string into BucketInfo Object. - * - * @param jsonString - Json String - * - * @return - BucketInfo - * - * @throws IOException - */ - public static BucketInfo parse(String jsonString) throws IOException { - return READER.readValue(jsonString); - } - - /** - * Returns a List of ACL on the Bucket. - * - * @return List of Acls - */ - public List getAcls() { - return acls; - } - - /** - * Sets ACls. - * - * @param acls - Acls list - */ - public void setAcls(List acls) { - this.acls = acls; - } - - /** - * Returns Storage Type info. - * - * @return Storage Type of the bucket - */ - public StorageType getStorageType() { - return storageType; - } - - /** - * Sets the Storage Type. - * - * @param storageType - Storage Type - */ - public void setStorageType(StorageType storageType) { - this.storageType = storageType; - } - - /** - * Returns versioning. - * - * @return versioning Enum - */ - public OzoneConsts.Versioning getVersioning() { - return versioning; - } - - /** - * Sets Versioning. - * - * @param versioning - */ - public void setVersioning(OzoneConsts.Versioning versioning) { - this.versioning = versioning; - } - - - /** - * Gets bucket Name. - * - * @return String - */ - public String getBucketName() { - return bucketName; - } - - /** - * Sets bucket Name. - * - * @param bucketName - Name of the bucket - */ - public void setBucketName(String bucketName) { - this.bucketName = bucketName; - } - - /** - * Sets creation time of the bucket. - * - * @param creationTime - Date String - */ - public void setCreatedOn(String creationTime) { - this.createdOn = creationTime; - } - - /** - * Returns creation time. - * - * @return creation time of bucket. - */ - public String getCreatedOn() { - return createdOn; - } - - - public void setEncryptionKeyName(String encryptionKeyName) { - this.encryptionKeyName = encryptionKeyName; - } - - public String getEncryptionKeyName() { - return encryptionKeyName; - } - - /** - * Returns a JSON string of this object. - * After stripping out bytesUsed and keyCount - * - * @return String - */ - public String toJsonString() throws IOException { - return WRITER.writeValueAsString(this); - } - - /** - * Returns the Object as a Json String. - * - * The reason why both toJSONString exists and toDBString exists - * is because toJSONString supports an external facing contract with - * REST clients. However server internally would want to add more - * fields to this class. The distinction helps in serializing all - * fields vs. only fields that are part of REST protocol. - */ - public String toDBString() throws IOException { - return JsonUtils.toJsonString(this); - } - - /** - * Returns Volume Name. - * - * @return String volume name - */ - public String getVolumeName() { - return volumeName; - } - - /** - * Sets the Volume Name of the bucket. - * - * @param volumeName - volumeName - */ - public void setVolumeName(String volumeName) { - this.volumeName = volumeName; - } - - /** - * Compares this object with the specified object for order. Returns a - * negative integer, zero, or a positive integer as this object is less - * than, equal to, or greater than the specified object. - * - * Please note : BucketInfo compare functions are used only within the - * context of a volume, hence volume name is purposefully ignored in - * compareTo, equal and hashcode functions of this class. - */ - @Override - public int compareTo(BucketInfo o) { - Preconditions.checkState(o.getVolumeName().equals(this.getVolumeName())); - return this.bucketName.compareTo(o.getBucketName()); - } - - /** - * Checks if two bucketInfo's are equal. - * @param o Object BucketInfo - * @return True or False - */ - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof BucketInfo)) { - return false; - } - - BucketInfo that = (BucketInfo) o; - Preconditions.checkState(that.getVolumeName().equals(this.getVolumeName())); - return bucketName.equals(that.bucketName); - - } - - /** - * Hash Code for this object. - * @return int - */ - @Override - public int hashCode() { - return bucketName.hashCode(); - } - - /** - * Get the number of bytes used by this bucket. - * - * @return long - */ - public long getBytesUsed() { - return bytesUsed; - } - - /** - * Set bytes Used. - * - * @param bytesUsed - bytesUsed - */ - public void setBytesUsed(long bytesUsed) { - this.bytesUsed = bytesUsed; - } - - /** - * Get Key Count inside this bucket. - * - * @return - KeyCount - */ - public long getKeyCount() { - return keyCount; - } - - /** - * Set Key Count inside this bucket. - * - * @param keyCount - Sets the Key Count - */ - public void setKeyCount(long keyCount) { - this.keyCount = keyCount; - } - - /** - * This class allows us to create custom filters - * for the Json serialization. - */ - @JsonFilter(BUCKET_INFO) - static class MixIn { - - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java deleted file mode 100644 index f4801945e40..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java +++ /dev/null @@ -1,311 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.response; - -import java.io.IOException; - -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.apache.commons.lang3.builder.HashCodeBuilder; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.ozone.web.utils.JsonUtils; - -import com.fasterxml.jackson.annotation.JsonAutoDetect; -import com.fasterxml.jackson.annotation.JsonFilter; -import com.fasterxml.jackson.annotation.PropertyAccessor; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; -import com.fasterxml.jackson.databind.ObjectWriter; -import com.fasterxml.jackson.databind.ser.FilterProvider; -import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter; -import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider; - -/** - * Represents an Ozone key Object. - */ -public class KeyInfo implements Comparable { - static final String OBJECT_INFO = "OBJECT_INFO_FILTER"; - - private static final ObjectReader READER = - new ObjectMapper().readerFor(KeyInfo.class); - private static final ObjectWriter WRITER; - - static { - ObjectMapper mapper = new ObjectMapper(); - String[] ignorableFieldNames = {"dataFileName"}; - - FilterProvider filters = new SimpleFilterProvider() - .addFilter(OBJECT_INFO, SimpleBeanPropertyFilter - .serializeAllExcept(ignorableFieldNames)); - mapper.setVisibility(PropertyAccessor.FIELD, - JsonAutoDetect.Visibility.ANY); - mapper.addMixIn(Object.class, MixIn.class); - - mapper.setFilterProvider(filters); - WRITER = mapper.writerWithDefaultPrettyPrinter(); - } - - /** - * This class allows us to create custom filters - * for the Json serialization. - */ - @JsonFilter(OBJECT_INFO) - class MixIn { - - } - private long version; - private String md5hash; - private String createdOn; - private String modifiedOn; - private long size; - private String keyName; - - private String dataFileName; - - private ReplicationType type; - - /** - * Return replication type of the key. - * - * @return replication type - */ - public ReplicationType getType() { - return type; - } - - /** - * Set replication type of the key. - * - * @param replicationType - */ - public void setType(ReplicationType replicationType) { - this.type = replicationType; - } - - /** - * When this key was created. - * - * @return Date String - */ - public String getCreatedOn() { - return createdOn; - } - - /** - * When this key was modified. - * - * @return Date String - */ - public String getModifiedOn() { - return modifiedOn; - } - - /** - * When this key was created. - * - * @param createdOn - Date String - */ - public void setCreatedOn(String createdOn) { - this.createdOn = createdOn; - } - - /** - * When this key was modified. - * - * @param modifiedOn - Date String - */ - public void setModifiedOn(String modifiedOn) { - this.modifiedOn = modifiedOn; - } - - /** - * Full path to where the actual data for this key is stored. - * - * @return String - */ - public String getDataFileName() { - return dataFileName; - } - - /** - * Sets up where the file path is stored. - * - * @param dataFileName - Data File Name - */ - public void setDataFileName(String dataFileName) { - this.dataFileName = dataFileName; - } - - /** - * Gets the Keyname of this object. - * - * @return String - */ - public String getKeyName() { - return keyName; - } - - /** - * Sets the Key name of this object. - * - * @param keyName - String - */ - public void setKeyName(String keyName) { - this.keyName = keyName; - } - - /** - * Returns the MD5 Hash for the data of this key. - * - * @return String MD5 - */ - public String getMd5hash() { - return md5hash; - } - - /** - * Sets the MD5 of this file. - * - * @param md5hash - Md5 of this file - */ - public void setMd5hash(String md5hash) { - this.md5hash = md5hash; - } - - /** - * Number of bytes stored in the data part of this key. - * - * @return long size of the data file - */ - public long getSize() { - return size; - } - - /** - * Sets the size of the Data part of this key. - * - * @param size - Size in long - */ - public void setSize(long size) { - this.size = size; - } - - /** - * Version of this key. - * - * @return - returns the version of this key. - */ - public long getVersion() { - return version; - } - - /** - * Sets the version of this key. - * - * @param version - Version String - */ - public void setVersion(long version) { - this.version = version; - } - - /** - * Compares this object with the specified object for order. Returns a - * negative integer, zero, or a positive integer as this object is less - * than, equal to, or greater than the specified object. - * - * @param o the object to be compared. - * - * @return a negative integer, zero, or a positive integer as this object - * is less than, equal to, or greater than the specified object. - * - * @throws NullPointerException if the specified object is null - * @throws ClassCastException if the specified object's type prevents it - * from being compared to this object. - */ - @Override - public int compareTo(KeyInfo o) { - if (this.keyName.compareTo(o.getKeyName()) != 0) { - return this.keyName.compareTo(o.getKeyName()); - } - - if (this.getVersion() == o.getVersion()) { - return 0; - } - if (this.getVersion() < o.getVersion()) { - return -1; - } - return 1; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - - if (o == null || getClass() != o.getClass()) { - return false; - } - - KeyInfo keyInfo = (KeyInfo) o; - - return new EqualsBuilder() - .append(version, keyInfo.version) - .append(keyName, keyInfo.keyName) - .isEquals(); - } - - @Override - public int hashCode() { - return new HashCodeBuilder(17, 37) - .append(version) - .append(keyName) - .toHashCode(); - } - - /** - - * Parse a string to retuen BucketInfo Object. - * - * @param jsonString - Json String - * - * @return - BucketInfo - * - * @throws IOException - */ - public static KeyInfo parse(String jsonString) throws IOException { - return READER.readValue(jsonString); - } - - - /** - * Returns a JSON string of this object. - * After stripping out bytesUsed and keyCount - * - * @return String - */ - public String toJsonString() throws IOException { - return WRITER.writeValueAsString(this); - } - - /** - * Returns the Object as a Json String. - */ - public String toDBString() throws IOException { - return JsonUtils.toJsonString(this); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfoDetails.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfoDetails.java deleted file mode 100644 index 7f2ba098d79..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfoDetails.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.response; - -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.apache.commons.lang3.builder.HashCodeBuilder; - -import java.util.List; - -/** - * Represents an Ozone key Object with detail information of location. - */ -public class KeyInfoDetails extends KeyInfo { - /** - * a list of Map which maps localID to ContainerID - * to specify replica locations. - */ - private List keyLocations; - - /** - * Set details of key location. - * - * @param keyLocations - details of key location - */ - public void setKeyLocations(List keyLocations) { - this.keyLocations = keyLocations; - } - - /** - * Returns details of key location. - * - * @return volumeName - */ - public List getKeyLocations() { - return keyLocations; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - - if (o == null || getClass() != o.getClass()) { - return false; - } - - KeyInfoDetails that = (KeyInfoDetails) o; - - return new EqualsBuilder() - .append(getVersion(), that.getVersion()) - .append(getKeyName(), that.getKeyName()) - .append(keyLocations, that.getKeyLocations()) - .isEquals(); - } - - @Override - public int hashCode() { - return new HashCodeBuilder(17, 37) - .append(getVersion()) - .append(getKeyName()) - .append(keyLocations) - .toHashCode(); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/KeyLocation.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/KeyLocation.java deleted file mode 100644 index d03eff74753..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/KeyLocation.java +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.response; - -/** - * KeyLocation class is used used for parsing json response - * when KeyInfoDetails Call is made. - */ -public class KeyLocation { - /** - * Which container this key stored. - */ - private final long containerID; - /** - * Which block this key stored inside a container. - */ - private final long localID; - /** - * Data length of this key replica. - */ - private final long length; - /** - * Offset of this key. - */ - private final long offset; - - /** - * Constructs KeyLocation. - */ - public KeyLocation(long containerID, long localID, - long length, long offset) { - this.containerID = containerID; - this.localID = localID; - this.length = length; - this.offset = offset; - } - - /** - * Returns the containerID of this Key. - */ - public long getContainerID() { - return containerID; - } - - /** - * Returns the localID of this Key. - */ - public long getLocalID() { - return localID; - } - - /** - * Returns the length of this Key. - */ - public long getLength() { - return length; - } - - /** - * Returns the offset of this Key. - */ - public long getOffset() { - return offset; - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/ListBuckets.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/ListBuckets.java deleted file mode 100644 index bc4e65be0df..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/ListBuckets.java +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.response; - - -import java.io.IOException; -import java.util.Collections; -import java.util.LinkedList; -import java.util.List; - -import org.apache.hadoop.ozone.web.utils.JsonUtils; - -import com.fasterxml.jackson.annotation.JsonAutoDetect; -import com.fasterxml.jackson.annotation.JsonFilter; -import com.fasterxml.jackson.annotation.PropertyAccessor; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; -import com.fasterxml.jackson.databind.ObjectWriter; -import com.fasterxml.jackson.databind.ser.FilterProvider; -import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter; -import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider; - -/** - * List Bucket is the response for the ListBucket Query. - */ -public class ListBuckets { - static final String BUCKET_LIST = "BUCKET_LIST_FILTER"; - private static final ObjectReader READER = - new ObjectMapper().readerFor(ListBuckets.class); - private static final ObjectWriter WRITER; - - static { - ObjectMapper mapper = new ObjectMapper(); - String[] ignorableFieldNames = {"dataFileName"}; - - FilterProvider filters = new SimpleFilterProvider() - .addFilter(BUCKET_LIST, SimpleBeanPropertyFilter - .serializeAllExcept(ignorableFieldNames)); - mapper.setVisibility(PropertyAccessor.FIELD, - JsonAutoDetect.Visibility.ANY); - mapper.addMixIn(Object.class, MixIn.class); - - mapper.setFilterProvider(filters); - WRITER = mapper.writerWithDefaultPrettyPrinter(); - } - - private List buckets; - - /** - * Constructor for ListBuckets. - * @param buckets - List of buckets owned by this user - */ - public ListBuckets(List buckets) { - this.buckets = buckets; - - } - - /** - * Constructor for ListBuckets. - */ - public ListBuckets() { - this.buckets = new LinkedList(); - } - - /** - * Parses a String to return ListBuckets object. - * - * @param data - Json String - * - * @return - ListBuckets - * - * @throws IOException - */ - public static ListBuckets parse(String data) throws IOException { - return READER.readValue(data); - } - - /** - * Returns a list of Buckets. - * - * @return Bucket list - */ - public List getBuckets() { - return buckets; - } - - /** - * Sets the list of buckets owned by this user. - * - * @param buckets - List of Buckets - */ - public void setBuckets(List buckets) { - this.buckets = buckets; - } - - - /** - * Returns a JSON string of this object. - * After stripping out bytesUsed and keyCount - * - * @return String - */ - public String toJsonString() throws IOException { - return WRITER.writeValueAsString(this); - } - - /** - * Returns the Object as a Json String. - */ - public String toDBString() throws IOException { - return JsonUtils.toJsonString(this); - } - - /** - * Sorts the buckets based on bucketName. - * This is useful when we return the list of buckets - */ - public void sort() { - Collections.sort(buckets); - } - - /** - * Add a new bucket to the list of buckets. - * @param bucketInfo - bucket Info - */ - public void addBucket(BucketInfo bucketInfo){ - this.buckets.add(bucketInfo); - } - - /** - * This class allows us to create custom filters - * for the Json serialization. - */ - @JsonFilter(BUCKET_LIST) - class MixIn { - - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/ListKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/ListKeys.java deleted file mode 100644 index 9dc77d2c234..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/ListKeys.java +++ /dev/null @@ -1,209 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.response; - -import java.io.IOException; -import java.util.Collections; -import java.util.LinkedList; -import java.util.List; - -import org.apache.hadoop.ozone.web.handlers.BucketArgs; -import org.apache.hadoop.ozone.web.handlers.ListArgs; -import org.apache.hadoop.ozone.web.utils.JsonUtils; - -import com.fasterxml.jackson.annotation.JsonAutoDetect; -import com.fasterxml.jackson.annotation.JsonFilter; -import com.fasterxml.jackson.annotation.PropertyAccessor; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; -import com.fasterxml.jackson.databind.ObjectWriter; -import com.fasterxml.jackson.databind.ser.FilterProvider; -import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter; -import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider; -import com.google.common.base.Preconditions; - -/** - * This class the represents the list of keys (Objects) in a bucket. - */ -public class ListKeys { - static final String OBJECT_LIST = "OBJECT_LIST_FILTER"; - - private static final ObjectReader READER = - new ObjectMapper().readerFor(ListKeys.class); - private static final ObjectWriter WRITER; - - static { - ObjectMapper mapper = new ObjectMapper(); - String[] ignorableFieldNames = {"dataFileName"}; - - FilterProvider filters = new SimpleFilterProvider() - .addFilter(OBJECT_LIST, SimpleBeanPropertyFilter - .serializeAllExcept(ignorableFieldNames)); - mapper.setVisibility(PropertyAccessor.FIELD, - JsonAutoDetect.Visibility.ANY); - mapper.addMixIn(Object.class, MixIn.class); - - mapper.setFilterProvider(filters); - WRITER = mapper.writerWithDefaultPrettyPrinter(); - } - - private String name; - private String prefix; - private long maxKeys; - private boolean truncated; - private List keyList; - - /** - * Default constructor needed for json serialization. - */ - public ListKeys() { - this.keyList = new LinkedList<>(); - } - - /** - * Constructor for ListKeys. - * - * @param args ListArgs - * @param truncated is truncated - */ - public ListKeys(ListArgs args, boolean truncated) { - Preconditions.checkState(args.getArgs() instanceof BucketArgs); - this.name = ((BucketArgs) args.getArgs()).getBucketName(); - this.prefix = args.getPrefix(); - this.maxKeys = args.getMaxKeys(); - this.truncated = truncated; - } - - /** - * Converts a Json string to POJO. - * @param jsonString - json string. - * @return ListObject - * @throws IOException - Json conversion error. - */ - public static ListKeys parse(String jsonString) throws IOException { - return READER.readValue(jsonString); - } - - /** - * Returns a list of Objects. - * - * @return List of KeyInfo Objects. - */ - public List getKeyList() { - return keyList; - } - - /** - * Sets the list of Objects. - * - * @param objectList - List of Keys - */ - public void setKeyList(List objectList) { - this.keyList = objectList; - } - - /** - * Gets the Max Key Count. - * - * @return long - */ - public long getMaxKeys() { - return maxKeys; - } - - /** - * Gets bucket Name. - * - * @return String - */ - public String getName() { - return name; - } - - /** - * Gets Prefix. - * - * @return String - */ - public String getPrefix() { - return prefix; - } - - /** - * Gets truncated Status. - * - * @return Boolean - */ - public boolean isTruncated() { - return truncated; - } - - /** - * Sets the value of truncated. - * - * @param value - Boolean - */ - public void setTruncated(boolean value) { - this.truncated = value; - } - - /** - * Returns a JSON string of this object. After stripping out bytesUsed and - * keyCount. - * - * @return String - * @throws IOException - On json Errors. - */ - public String toJsonString() throws IOException { - return WRITER.writeValueAsString(this); - } - - /** - * Returns the Object as a Json String. - * - * @return String - * @throws IOException - on json errors. - */ - public String toDBString() throws IOException { - return JsonUtils.toJsonString(this); - } - - /** - * Sorts the keys based on name and version. This is useful when we return the - * list of keys. - */ - public void sort() { - Collections.sort(keyList); - } - - /** - * Add a new key to the list of keys. - * @param keyInfo - key Info - */ - public void addKey(KeyInfo keyInfo){ - this.keyList.add(keyInfo); - } - - /** - * This class allows us to create custom filters for the Json serialization. - */ - @JsonFilter(OBJECT_LIST) - class MixIn { - - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/ListVolumes.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/ListVolumes.java deleted file mode 100644 index b918349d493..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/ListVolumes.java +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.response; - -import java.io.IOException; -import java.util.Collections; -import java.util.LinkedList; -import java.util.List; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.ozone.web.utils.JsonUtils; - -import com.fasterxml.jackson.annotation.JsonAutoDetect; -import com.fasterxml.jackson.annotation.JsonFilter; -import com.fasterxml.jackson.annotation.PropertyAccessor; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; -import com.fasterxml.jackson.databind.ObjectWriter; -import com.fasterxml.jackson.databind.ser.FilterProvider; -import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter; -import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider; - -/** - * List Volume Class is the class that is returned in JSON format to - * users when they call ListVolumes. - */ -@InterfaceAudience.Private -public class ListVolumes { - private List volumes; - - static final String VOLUME_LIST = "VOLUME_LIST_FILTER"; - private static final ObjectReader READER = - new ObjectMapper().readerFor(ListVolumes.class); - private static final ObjectWriter WRITER; - - static { - ObjectMapper mapper = new ObjectMapper(); - String[] ignorableFieldNames = {"bytesUsed", "bucketCount"}; - - FilterProvider filters = new SimpleFilterProvider() - .addFilter(VOLUME_LIST, SimpleBeanPropertyFilter - .serializeAllExcept(ignorableFieldNames)); - mapper.setVisibility(PropertyAccessor.FIELD, - JsonAutoDetect.Visibility.ANY); - mapper.addMixIn(Object.class, MixIn.class); - - mapper.setFilterProvider(filters); - WRITER = mapper.writerWithDefaultPrettyPrinter(); - } - - /** - * Used for json filtering. - */ - @JsonFilter(VOLUME_LIST) - class MixIn { - } - - /** - * Constructs ListVolume objects. - */ - public ListVolumes() { - this.volumes = new LinkedList(); - } - - /** - * Gets the list of volumes. - * - * @return List of VolumeInfo Objects - */ - public List getVolumes() { - return volumes; - } - - - /** - * Sets volume info. - * - * @param volumes - List of Volumes - */ - public void setVolumes(List volumes) { - this.volumes = volumes; - } - - /** - * Returns a JSON string of this object. - * After stripping out bytesUsed and bucketCount - * - * @return String - */ - public String toJsonString() throws IOException { - return WRITER.writeValueAsString(this); - } - - /** - * When we serialize a volumeInfo to our database - * we will use all fields. However the toJsonString - * will strip out bytesUsed and bucketCount from the - * volume Info - * - * @return Json String - * - * @throws IOException - */ - public String toDBString() throws IOException { - return JsonUtils.toJsonString(this); - } - - /** - * Parses a String to return ListVolumes object. - * - * @param data - Json String - * - * @return - ListVolumes - * - * @throws IOException - */ - public static ListVolumes parse(String data) throws IOException { - return READER.readValue(data); - } - - /** - * Adds a new volume info to the List. - * - * @param info - VolumeInfo - */ - public void addVolume(VolumeInfo info) { - this.volumes.add(info); - } - - /** - * Sorts the volume names based on volume name. - * This is useful when we return the list of volume names - */ - public void sort() { - Collections.sort(volumes); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/VolumeInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/VolumeInfo.java deleted file mode 100644 index 112b27e5415..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/VolumeInfo.java +++ /dev/null @@ -1,308 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.response; - - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.ozone.web.request.OzoneQuota; -import org.apache.hadoop.ozone.web.utils.JsonUtils; - -import com.fasterxml.jackson.annotation.JsonAutoDetect; -import com.fasterxml.jackson.annotation.JsonFilter; -import com.fasterxml.jackson.annotation.PropertyAccessor; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; -import com.fasterxml.jackson.databind.ObjectWriter; -import com.fasterxml.jackson.databind.ser.FilterProvider; -import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter; -import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider; - -/** - * VolumeInfo Class is the Java class that represents - * Json when VolumeInfo Call is made. - */ -@InterfaceAudience.Private -public class VolumeInfo implements Comparable { - - static final String VOLUME_INFO = "VOLUME_INFO_FILTER"; - private static final ObjectReader READER = - new ObjectMapper().readerFor(VolumeInfo.class); - private static final ObjectWriter WRITER; - - static { - ObjectMapper mapper = new ObjectMapper(); - String[] ignorableFieldNames = {"bytesUsed", "bucketCount"}; - - FilterProvider filters = new SimpleFilterProvider() - .addFilter(VOLUME_INFO, SimpleBeanPropertyFilter - .serializeAllExcept(ignorableFieldNames)); - mapper.setVisibility(PropertyAccessor.FIELD, - JsonAutoDetect.Visibility.ANY); - mapper.addMixIn(Object.class, MixIn.class); - - mapper.setFilterProvider(filters); - WRITER = mapper.writerWithDefaultPrettyPrinter(); - } - - /** - * Custom Json Filter Class. - */ - @JsonFilter(VOLUME_INFO) - class MixIn { - } - private VolumeOwner owner; - private OzoneQuota quota; - private String volumeName; - private String createdOn; - private String createdBy; - - private long bytesUsed; - private long bucketCount; - - - /** - * Constructor for VolumeInfo. - * - * @param volumeName - Name of the Volume - * @param createdOn _ Date String - * @param createdBy - Person who created it - */ - public VolumeInfo(String volumeName, String createdOn, String createdBy) { - this.createdOn = createdOn; - this.volumeName = volumeName; - this.createdBy = createdBy; - } - - /** - * Constructor for VolumeInfo. - */ - public VolumeInfo() { - } - - /** - * Returns the name of the person who created this volume. - * - * @return Name of Admin who created this - */ - public String getCreatedBy() { - return createdBy; - } - - /** - * Sets the user name of the person who created this volume. - * - * @param createdBy - UserName - */ - public void setCreatedBy(String createdBy) { - this.createdBy = createdBy; - } - - /** - * Gets the date on which this volume was created. - * - * @return - Date String - */ - public String getCreatedOn() { - return createdOn; - } - - /** - * Sets the date string. - * - * @param createdOn - Date String - */ - public void setCreatedOn(String createdOn) { - this.createdOn = createdOn; - } - - /** - * Returns the owner info. - * - * @return - OwnerInfo - */ - public VolumeOwner getOwner() { - return owner; - } - - /** - * Sets the owner. - * - * @param owner - OwnerInfo - */ - public void setOwner(VolumeOwner owner) { - this.owner = owner; - } - - /** - * Returns the quota information on a volume. - * - * @return Quota - */ - public OzoneQuota getQuota() { - return quota; - } - - /** - * Sets the quota info. - * - * @param quota - Quota Info - */ - public void setQuota(OzoneQuota quota) { - this.quota = quota; - } - - /** - * gets the volume name. - * - * @return - Volume Name - */ - public String getVolumeName() { - return volumeName; - } - - /** - * Sets the volume name. - * - * @param volumeName - Volume Name - */ - public void setVolumeName(String volumeName) { - this.volumeName = volumeName; - } - - /** - * Returns a JSON string of this object. - * After stripping out bytesUsed and bucketCount - * - * @return String - json string - * @throws IOException - */ - public String toJsonString() throws IOException { - return WRITER.writeValueAsString(this); - } - - /** - * When we serialize a volumeInfo to our database - * we will use all fields. However the toJsonString - * will strip out bytesUsed and bucketCount from the - * volume Info - * - * @return Json String - * - * @throws IOException - */ - public String toDBString() throws IOException { - return JsonUtils.toJsonString(this); - } - - - /** - * Comparable Interface. - * @param o VolumeInfo Object. - * @return Result of comparison - */ - @Override - public int compareTo(VolumeInfo o) { - return this.volumeName.compareTo(o.getVolumeName()); - } - - /** - * Gets the number of bytesUsed by this volume. - * - * @return long - Bytes used - */ - public long getBytesUsed() { - return bytesUsed; - } - - /** - * Sets number of bytesUsed by this volume. - * - * @param bytesUsed - Number of bytesUsed - */ - public void setBytesUsed(long bytesUsed) { - this.bytesUsed = bytesUsed; - } - - /** - * Returns VolumeInfo class from json string. - * - * @param data - Json String - * - * @return VolumeInfo - * - * @throws IOException - */ - public static VolumeInfo parse(String data) throws IOException { - return READER.readValue(data); - } - - /** - * Indicates whether some other object is "equal to" this one. - * - * @param obj the reference object with which to compare. - * - * @return {@code true} if this object is the same as the obj - * argument; {@code false} otherwise. - */ - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - VolumeInfo otherInfo = (VolumeInfo) obj; - return otherInfo.getVolumeName().equals(this.getVolumeName()); - } - - /** - * Returns a hash code value for the object. This method is - * supported for the benefit of hash tables such as those provided by - * HashMap. - * @return a hash code value for this object. - * - * @see Object#equals(Object) - * @see System#identityHashCode - */ - @Override - public int hashCode() { - return getVolumeName().hashCode(); - } - - /** - * Total number of buckets under this volume. - * - * @return - bucketCount - */ - public long getBucketCount() { - return bucketCount; - } - - /** - * Sets the buckets count. - * - * @param bucketCount - Bucket Count - */ - public void setBucketCount(long bucketCount) { - this.bucketCount = bucketCount; - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/VolumeOwner.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/VolumeOwner.java deleted file mode 100644 index afb0460538c..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/VolumeOwner.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.response; - -import com.fasterxml.jackson.annotation.JsonInclude; -import org.apache.hadoop.classification.InterfaceAudience; - -/** - * Volume Owner represents the owner of a volume. - * - * This is a class instead of a string since we might need to extend this class - * to support other forms of authentication. - */ -@InterfaceAudience.Private -public class VolumeOwner { - @JsonInclude(JsonInclude.Include.NON_NULL) - private String name; - - /** - * Constructor for VolumeOwner. - * - * @param name - name of the User - */ - public VolumeOwner(String name) { - this.name = name; - } - - /** - * Constructs Volume Owner. - */ - public VolumeOwner() { - name = null; - } - - /** - * Returns the user name. - * - * @return Name - */ - public String getName() { - return name; - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/package-info.java deleted file mode 100644 index 3bf66c861be..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/** - * Netty-based HTTP server implementation for Ozone. - */ -package org.apache.hadoop.ozone.web.response; \ No newline at end of file diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestBucketInfo.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestBucketInfo.java deleted file mode 100644 index e364e7db81a..00000000000 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestBucketInfo.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web; - -import org.apache.hadoop.hdds.protocol.StorageType; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.web.response.BucketInfo; -import org.junit.Test; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; - -/** - * Test Ozone Bucket Info operation. - */ -public class TestBucketInfo { - @Test - public void testBucketInfoJson() throws IOException { - BucketInfo bucketInfo = new BucketInfo("volumeName", "bucketName"); - String bucketInfoString = bucketInfo.toJsonString(); - BucketInfo newBucketInfo = BucketInfo.parse(bucketInfoString); - assertEquals(bucketInfo, newBucketInfo); - } - - @Test - public void testBucketInfoDBString() throws IOException { - BucketInfo bucketInfo = new BucketInfo("volumeName", "bucketName"); - String bucketInfoString = bucketInfo.toDBString(); - BucketInfo newBucketInfo = BucketInfo.parse(bucketInfoString); - assertEquals(bucketInfo, newBucketInfo); - } - - @Test - public void testBucketInfoAddAcls() throws IOException { - BucketInfo bucketInfo = new BucketInfo("volumeName", "bucketName"); - String bucketInfoString = bucketInfo.toDBString(); - BucketInfo newBucketInfo = BucketInfo.parse(bucketInfoString); - assertEquals(bucketInfo, newBucketInfo); - List aclList = new ArrayList<>(); - - aclList.add(OzoneAcl.parseAcl("user:bilbo:r")); - aclList.add(OzoneAcl.parseAcl("user:samwise:rw")); - newBucketInfo.setAcls(aclList); - - assertNotNull(newBucketInfo.getAcls()); - assertEquals(2, newBucketInfo.getAcls().size()); - } - - @Test - public void testBucketInfoVersionAndType() throws IOException { - BucketInfo bucketInfo = new BucketInfo("volumeName", "bucketName"); - bucketInfo.setVersioning(OzoneConsts.Versioning.ENABLED); - bucketInfo.setStorageType(StorageType.DISK); - - String bucketInfoString = bucketInfo.toDBString(); - - BucketInfo newBucketInfo = BucketInfo.parse(bucketInfoString); - assertEquals(bucketInfo, newBucketInfo); - } - -} diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestQuota.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestQuota.java deleted file mode 100644 index ba4a5acf0c6..00000000000 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestQuota.java +++ /dev/null @@ -1,115 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.web; - -import org.apache.hadoop.ozone.web.request.OzoneQuota; -import org.junit.Test; - -import java.util.HashMap; -import java.util.Set; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -/** - * Test Ozone Volume Quota. - */ -public class TestQuota { - @Test - public void testParseQuota() { - HashMap testMatrix; - testMatrix = new HashMap(); - - testMatrix.put("10TB", Boolean.TRUE); - testMatrix.put("1 TB", Boolean.TRUE); - testMatrix.put("0MB", Boolean.TRUE); - testMatrix.put("0 TB", Boolean.TRUE); - testMatrix.put(" 1000MB ", Boolean.TRUE); - - testMatrix.put(" 1000MBMB ", Boolean.FALSE); - testMatrix.put(" 1000MB00 ", Boolean.FALSE); - testMatrix.put("1000ZMB", Boolean.FALSE); - testMatrix.put("MB1000", Boolean.FALSE); - testMatrix.put("9999", Boolean.FALSE); - testMatrix.put("1", Boolean.FALSE); - testMatrix.put("remove", Boolean.FALSE); - testMatrix.put("1UNDEFINED", Boolean.FALSE); - testMatrix.put(null, Boolean.FALSE); - testMatrix.put("", Boolean.FALSE); - testMatrix.put("-1000MB", Boolean.FALSE); - testMatrix.put("1024 bytes", Boolean.TRUE); - testMatrix.put("1bytes", Boolean.TRUE); - testMatrix.put("0bytes", Boolean.TRUE); - testMatrix.put("10000 BYTES", Boolean.TRUE); - testMatrix.put("BYTESbytes", Boolean.FALSE); - testMatrix.put("bytes", Boolean.FALSE); - - Set keys = testMatrix.keySet(); - for (String key : keys) { - if (testMatrix.get(key)) { - OzoneQuota.parseQuota(key); - } else { - try { - OzoneQuota.parseQuota(key); - // should never get here since the isValid call will throw - fail(key); - fail("An exception was expected but did not happen."); - } catch (IllegalArgumentException e) { - - } - } - } - } - - @Test - public void testVerifyQuota() { - OzoneQuota qt = OzoneQuota.parseQuota("10TB"); - assertEquals(10, qt.getSize()); - assertEquals(OzoneQuota.Units.TB, qt.getUnit()); - assertEquals(10L * (1024L * 1024L * 1024L * 1024L), qt.sizeInBytes()); - - qt = OzoneQuota.parseQuota("10MB"); - assertEquals(10, qt.getSize()); - assertEquals(OzoneQuota.Units.MB, qt.getUnit()); - assertEquals(10L * (1024L * 1024L), qt.sizeInBytes()); - - qt = OzoneQuota.parseQuota("10GB"); - assertEquals(10, qt.getSize()); - assertEquals(OzoneQuota.Units.GB, qt.getUnit()); - assertEquals(10L * (1024L * 1024L * 1024L), qt.sizeInBytes()); - - qt = OzoneQuota.parseQuota("10BYTES"); - assertEquals(10, qt.getSize()); - assertEquals(OzoneQuota.Units.BYTES, qt.getUnit()); - assertEquals(10L, qt.sizeInBytes()); - - OzoneQuota emptyQuota = new OzoneQuota(); - assertEquals(-1L, emptyQuota.sizeInBytes()); - assertEquals(0, emptyQuota.getSize()); - assertEquals(OzoneQuota.Units.UNDEFINED, emptyQuota.getUnit()); - } - - @Test - public void testVerifyRemove() { - assertTrue(OzoneQuota.isRemove("remove")); - assertFalse(OzoneQuota.isRemove("not remove")); - assertFalse(OzoneQuota.isRemove(null)); - } -} diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestUtils.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestUtils.java deleted file mode 100644 index d3f8f5e659c..00000000000 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestUtils.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web; - -import org.junit.Test; - -import java.util.HashMap; -import java.util.HashSet; -import java.util.Set; - -import static org.apache.hadoop.ozone.web.utils.OzoneUtils.getRequestID; -import static org.apache.hadoop.ozone.web.utils.OzoneUtils.verifyResourceName; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -/** - * Test Ozone Utility operations like verifying resource name. - */ -public class TestUtils { - - /** - * Tests if the bucket name handling is correct. - */ - @Test - public void testValidBucketNames() { - HashMap testMatrix; - // Init the Table with Strings and Expected Return values - testMatrix = new HashMap(); - - testMatrix.put("bucket-.ozone.self", Boolean.FALSE); - testMatrix.put("bucket.-ozone.self", Boolean.FALSE); - testMatrix.put(".bucket.ozone.self", Boolean.FALSE); - testMatrix.put("bucket.ozone.self.", Boolean.FALSE); - testMatrix.put("bucket..ozone.self", Boolean.FALSE); - testMatrix.put("192.1.1.1", Boolean.FALSE); - testMatrix.put("ab", Boolean.FALSE); - testMatrix.put("bucket.ozone.self.this.is.a.really.long.name.that." - + "is.more.than.sixty.three.characters.long.for.sure", Boolean.FALSE); - testMatrix.put(null, Boolean.FALSE); - testMatrix.put("bucket@$", Boolean.FALSE); - testMatrix.put("BUCKET", Boolean.FALSE); - testMatrix.put("bucket .ozone.self", Boolean.FALSE); - testMatrix.put(" bucket.ozone.self", Boolean.FALSE); - testMatrix.put("bucket.ozone.self-", Boolean.FALSE); - testMatrix.put("-bucket.ozone.self", Boolean.FALSE); - - testMatrix.put("bucket", Boolean.TRUE); - testMatrix.put("bucket.ozone.self", Boolean.TRUE); - testMatrix.put("bucket.ozone.self", Boolean.TRUE); - testMatrix.put("bucket-name.ozone.self", Boolean.TRUE); - testMatrix.put("bucket.1.ozone.self", Boolean.TRUE); - - Set keys = testMatrix.keySet(); - for (String key : keys) { - if (testMatrix.get(key)) { - - // For valid names there should be no exceptions at all - verifyResourceName(key); - } else { - try { - verifyResourceName(key); - // should never get here since the isValid call will throw - fail("An exception was expected but did not happen."); - } catch (IllegalArgumentException e) { - - } - } - } - } - - /** - * Just calls Request ID many times and assert we - * got different values, ideally this should be - * run under parallel threads. Since the function under - * test has no external dependencies it is assumed - * that this test is good enough. - */ - @Test - public void testRequestIDisRandom() { - HashSet set = new HashSet<>(); - for (int i = 0; i < 1000; i++) { - assertTrue(set.add(getRequestID())); - } - } -} diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestVolumeStructs.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestVolumeStructs.java deleted file mode 100644 index b433be6fb51..00000000000 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestVolumeStructs.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -package org.apache.hadoop.ozone.web; - -import org.apache.hadoop.ozone.web.response.ListVolumes; -import org.apache.hadoop.ozone.web.response.VolumeInfo; -import org.apache.hadoop.ozone.web.response.VolumeOwner; -import org.junit.Test; - -import java.io.IOException; - -import static org.junit.Assert.assertEquals; - -/** - * Test Ozone Volume info structure. - */ -public class TestVolumeStructs { - - @Test - public void testVolumeInfoParse() throws IOException { - VolumeInfo volInfo = - new VolumeInfo("testvol", "Thu, Apr 9, 2015 10:23:45 GMT", "gandalf"); - VolumeOwner owner = new VolumeOwner("bilbo"); - volInfo.setOwner(owner); - String jString = volInfo.toJsonString(); - VolumeInfo newVollInfo = VolumeInfo.parse(jString); - String one = volInfo.toJsonString(); - String two = newVollInfo.toJsonString(); - - assertEquals(volInfo.toJsonString(), newVollInfo.toJsonString()); - } - - @Test - public void testVolumeInfoValue() throws IOException { - String createdOn = "Thu, Apr 9, 2015 10:23:45 GMT"; - String createdBy = "gandalf"; - VolumeInfo volInfo = new VolumeInfo("testvol", createdOn, createdBy); - assertEquals(volInfo.getCreatedBy(), createdBy); - assertEquals(volInfo.getCreatedOn(), createdOn); - } - - - @Test - public void testVolumeListParse() throws IOException { - ListVolumes list = new ListVolumes(); - for (int x = 0; x < 100; x++) { - VolumeInfo volInfo = new VolumeInfo("testvol" + Integer.toString(x), - "Thu, Apr 9, 2015 10:23:45 GMT", "gandalf"); - list.addVolume(volInfo); - } - list.sort(); - String listString = list.toJsonString(); - ListVolumes newList = ListVolumes.parse(listString); - assertEquals(list.toJsonString(), newList.toJsonString()); - } -} diff --git a/hadoop-ozone/datanode/pom.xml b/hadoop-ozone/datanode/pom.xml index b1a48e3d10c..0f5c3c49ef7 100644 --- a/hadoop-ozone/datanode/pom.xml +++ b/hadoop-ozone/datanode/pom.xml @@ -47,10 +47,6 @@ org.apache.hadoop hadoop-hdds-container-service - - org.apache.hadoop - hadoop-ozone-objectstore-service - com.sun.xml.bind jaxb-impl diff --git a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching index 5def094fead..af896025c7b 100755 --- a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching +++ b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching @@ -112,10 +112,6 @@ run cp "${ROOT}/hadoop-ozone/common/src/main/bin/stop-ozone.sh" "sbin/" # fault injection tests run cp -r "${ROOT}/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade" tests -#shaded datanode service -run mkdir -p "./share/hadoop/ozoneplugin" -run cp "${ROOT}/hadoop-ozone/objectstore-service/target/hadoop-ozone-objectstore-service-${HDDS_VERSION}-plugin.jar" "./share/hadoop/ozoneplugin/hadoop-ozone-datanode-plugin-${HDDS_VERSION}.jar" - # Optional documentation, could be missing cp -r "${ROOT}/hadoop-hdds/docs/target/classes/docs" ./ diff --git a/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot b/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot index 96065673944..044ae7187b9 100644 --- a/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot +++ b/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot @@ -19,27 +19,34 @@ Library OperatingSystem Resource ../commonlib.robot Test Setup Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab Test Timeout 2 minute +Suite Setup Generate prefix *** Variables *** +${prefix} generated + +*** Keywords *** +Generate prefix + ${random} = Generate Random String 5 [NUMBERS] + Set Suite Variable ${prefix} ${random} *** Test Cases *** RpcClient with port - Test ozone shell o3:// om:9862 rpcwoport + Test ozone shell o3:// om:9862 ${prefix}-rpcwoport RpcClient volume acls - Test Volume Acls o3:// om:9862 rpcwoport2 + Test Volume Acls o3:// om:9862 ${prefix}-rpcwoport2 RpcClient bucket acls - Test Bucket Acls o3:// om:9862 rpcwoport2 + Test Bucket Acls o3:// om:9862 ${prefix}-rpcwoport2 RpcClient key acls - Test Key Acls o3:// om:9862 rpcwoport2 + Test Key Acls o3:// om:9862 ${prefix}-rpcwoport2 RpcClient without host - Test ozone shell o3:// ${EMPTY} rpcwport + Test ozone shell o3:// ${EMPTY} ${prefix}-rpcwport RpcClient without scheme - Test ozone shell ${EMPTY} ${EMPTY} rpcwoscheme + Test ozone shell ${EMPTY} ${EMPTY} ${prefix}-rpcwoscheme *** Keywords *** @@ -48,20 +55,20 @@ Test ozone shell ${result} = Execute ozone sh volume create ${protocol}${server}/${volume} --quota 100TB Should not contain ${result} Failed Should contain ${result} Creating Volume: ${volume} - ${result} = Execute ozone sh volume list ${protocol}${server}/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="${volume}")' - Should contain ${result} createdOn - ${result} = Execute ozone sh volume list | grep -Ev 'Removed|DEBUG|ERROR|INFO|TRACE|WARN' | jq -r '.[] | select(.volumeName=="${volume}")' - Should contain ${result} createdOn + ${result} = Execute ozone sh volume list ${protocol}${server}/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="${volume}")' + Should contain ${result} creationTime + ${result} = Execute ozone sh volume list | grep -Ev 'Removed|DEBUG|ERROR|INFO|TRACE|WARN' | jq -r '. | select(.name=="${volume}")' + Should contain ${result} creationTime # TODO: Disable updating the owner, acls should be used to give access to other user. Execute ozone sh volume update ${protocol}${server}/${volume} --quota 10TB # ${result} = Execute ozone sh volume info ${protocol}${server}/${volume} | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="${volume}") | .owner | .name' # Should Be Equal ${result} bill - ${result} = Execute ozone sh volume info ${protocol}${server}/${volume} | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="${volume}") | .quota | .size' - Should Be Equal ${result} 10 + ${result} = Execute ozone sh volume info ${protocol}${server}/${volume} | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="${volume}") | .quota' + Should Be Equal ${result} 10995116277760 Execute ozone sh bucket create ${protocol}${server}/${volume}/bb1 - ${result} = Execute ozone sh bucket info ${protocol}${server}/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType' + ${result} = Execute ozone sh bucket info ${protocol}${server}/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="bb1") | .storageType' Should Be Equal ${result} DISK - ${result} = Execute ozone sh bucket list ${protocol}${server}/${volume}/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName' + ${result} = Execute ozone sh bucket list ${protocol}${server}/${volume}/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="bb1") | .volumeName' Should Be Equal ${result} ${volume} Run Keyword Test key handling ${protocol} ${server} ${volume} Execute ozone sh bucket delete ${protocol}${server}/${volume}/bb1 @@ -106,12 +113,12 @@ Test key handling Execute rm -f NOTICE.txt.1 Execute ozone sh key get ${protocol}${server}/${volume}/bb1/key1 NOTICE.txt.1 Execute ls -l NOTICE.txt.1 - ${result} = Execute ozone sh key info ${protocol}${server}/${volume}/bb1/key1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.keyName=="key1")' - Should contain ${result} createdOn - ${result} = Execute ozone sh key list ${protocol}${server}/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.keyName=="key1") | .keyName' + ${result} = Execute ozone sh key info ${protocol}${server}/${volume}/bb1/key1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="key1")' + Should contain ${result} creationTime + ${result} = Execute ozone sh key list ${protocol}${server}/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="key1") | .name' Should Be Equal ${result} key1 Execute ozone sh key rename ${protocol}${server}/${volume}/bb1 key1 key2 - ${result} = Execute ozone sh key list ${protocol}${server}/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[].keyName' + ${result} = Execute ozone sh key list ${protocol}${server}/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.name' Should Be Equal ${result} key2 Execute ozone sh key delete ${protocol}${server}/${volume}/bb1/key2 diff --git a/hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot b/hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot index 59df7b8482d..f728691b5f7 100644 --- a/hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot +++ b/hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot @@ -34,14 +34,14 @@ Check volume from ozonefs Run ozoneFS tests Execute ozone fs -mkdir -p o3fs://bucket1.fstest/testdir/deep - ${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName' + ${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name' Should contain ${result} testdir/deep Execute ozone fs -copyFromLocal NOTICE.txt o3fs://bucket1.fstest/testdir/deep/ - ${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName' + ${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name' Should contain ${result} NOTICE.txt Execute ozone fs -put NOTICE.txt o3fs://bucket1.fstest/testdir/deep/PUTFILE.txt - ${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName' + ${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name' Should contain ${result} PUTFILE.txt ${result} = Execute ozone fs -ls o3fs://bucket1.fstest/testdir/deep/ @@ -49,13 +49,13 @@ Run ozoneFS tests Should contain ${result} PUTFILE.txt Execute ozone fs -mv o3fs://bucket1.fstest/testdir/deep/NOTICE.txt o3fs://bucket1.fstest/testdir/deep/MOVED.TXT - ${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName' + ${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name' Should contain ${result} MOVED.TXT Should not contain ${result} NOTICE.txt Execute ozone fs -mkdir -p o3fs://bucket1.fstest/testdir/deep/subdir1 Execute ozone fs -cp o3fs://bucket1.fstest/testdir/deep/MOVED.TXT o3fs://bucket1.fstest/testdir/deep/subdir1/NOTICE.txt - ${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName' + ${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name' Should contain ${result} subdir1/NOTICE.txt ${result} = Execute ozone fs -ls o3fs://bucket1.fstest/testdir/deep/subdir1/ @@ -65,19 +65,19 @@ Run ozoneFS tests Should not contain ${result} Failed Execute ozone fs -rm o3fs://bucket1.fstest/testdir/deep/subdir1/NOTICE.txt - ${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName' + ${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name' Should not contain ${result} NOTICE.txt ${result} = Execute ozone fs -rmdir o3fs://bucket1.fstest/testdir/deep/subdir1/ - ${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName' + ${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name' Should not contain ${result} subdir1 Execute ozone fs -touch o3fs://bucket1.fstest/testdir/TOUCHFILE.txt - ${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName' + ${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name' Should contain ${result} TOUCHFILE.txt Execute ozone fs -rm -r o3fs://bucket1.fstest/testdir/ - ${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName' + ${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name' Should not contain ${result} testdir Execute rm -Rf localdir1 diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml index fee0c615279..5593f28e9d4 100644 --- a/hadoop-ozone/integration-test/pom.xml +++ b/hadoop-ozone/integration-test/pom.xml @@ -48,10 +48,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test - - org.apache.hadoop - hadoop-ozone-objectstore-service - org.apache.hadoop hadoop-ozone-s3gateway diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java index 44c005c29ec..9dd3f005c8f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java @@ -137,15 +137,6 @@ static Builder newHABuilder(OzoneConfiguration conf) { */ OzoneClient getRpcClient() throws IOException; - /** - * Returns an REST based {@link OzoneClient} to access the - * {@link MiniOzoneCluster}. - * - * @return {@link OzoneClient} - * @throws IOException - */ - OzoneClient getRestClient() throws IOException; - /** * Returns StorageContainerLocationClient to communicate with * {@link StorageContainerManager} associated with the MiniOzoneCluster. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java index be1b77b2303..dc0ed9f7177 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java @@ -36,7 +36,6 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.rest.OzoneException; import org.apache.hadoop.ozone.common.Storage.StorageState; import org.apache.hadoop.ozone.container.common.utils.ContainerCache; import org.apache.hadoop.ozone.om.OMConfigKeys; @@ -67,7 +66,6 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState .HEALTHY; -import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY; import static org.apache.hadoop.ozone.OzoneConfigKeys .DFS_CONTAINER_IPC_PORT; import static org.apache.hadoop.ozone.OzoneConfigKeys @@ -212,18 +210,6 @@ public OzoneClient getRpcClient() throws IOException { return OzoneClientFactory.getRpcClient(conf); } - /** - * Creates an {@link OzoneClient} connected to this cluster's REST - * service. Callers take ownership of the client and must close it when done. - * - * @return OzoneRestClient connected to this cluster's REST service - * @throws OzoneException if Ozone encounters an error creating the client - */ - @Override - public OzoneClient getRestClient() throws IOException { - return OzoneClientFactory.getRestClient(conf); - } - /** * Returns an RPC proxy connected to this cluster's StorageContainerManager * for accessing container location information. Callers take ownership of @@ -619,8 +605,6 @@ private void configureOM() { private void configureHddsDatanodes() { conf.set(ScmConfigKeys.HDDS_REST_HTTP_ADDRESS_KEY, "0.0.0.0:0"); conf.set(HddsConfigKeys.HDDS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); - conf.set(HDDS_DATANODE_PLUGINS_KEY, - "org.apache.hadoop.ozone.web.OzoneHddsDatanodeService"); conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, randomContainerPort); conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java index a1243e87f7b..9b0f2f74194 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java @@ -18,15 +18,23 @@ package org.apache.hadoop.ozone; +import java.io.Closeable; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.ratis.RatisHelper; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import org.apache.hadoop.ozone.client.rpc.RpcClient; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.ozone.client.rest.OzoneException; -import org.apache.hadoop.hdds.ratis.RatisHelper; + +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.ratis.RatisHelper.newRaftClient; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import org.apache.ratis.client.RaftClient; import org.apache.ratis.protocol.RaftPeer; import org.apache.ratis.rpc.RpcType; @@ -35,16 +43,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.Closeable; -import java.io.IOException; -import java.net.URISyntaxException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; -import static org.apache.hadoop.hdds.ratis.RatisHelper.newRaftClient; - /** * Helpers for Ratis tests. */ @@ -80,7 +78,7 @@ public MiniOzoneCluster getCluster() { } public ClientProtocol newOzoneClient() - throws OzoneException, URISyntaxException, IOException { + throws IOException { return new RpcClient(conf); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java new file mode 100644 index 00000000000..e27aa857258 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.HashMap; +import java.util.Scanner; + +import org.apache.hadoop.hdds.client.ReplicationFactor; +import org.apache.hadoop.hdds.client.ReplicationType; +import org.apache.hadoop.hdds.protocol.StorageType; +import org.apache.hadoop.ozone.client.BucketArgs; +import org.apache.hadoop.ozone.client.ObjectStore; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.client.VolumeArgs; + +import org.apache.commons.lang3.RandomStringUtils; + +/** + * Utility to help to generate test data. + */ +public final class TestDataUtil { + + private TestDataUtil() { + } + + public static OzoneBucket createVolumeAndBucket(MiniOzoneCluster cluster, + String volumeName, String bucketName) throws IOException { + String userName = "user" + RandomStringUtils.randomNumeric(5); + String adminName = "admin" + RandomStringUtils.randomNumeric(5); + + OzoneClient client = cluster.getClient(); + + VolumeArgs volumeArgs = VolumeArgs.newBuilder() + .setAdmin(adminName) + .setOwner(userName) + .build(); + + ObjectStore objectStore = client.getObjectStore(); + + objectStore.createVolume(volumeName, volumeArgs); + + OzoneVolume volume = objectStore.getVolume(volumeName); + + BucketArgs omBucketArgs = BucketArgs.newBuilder() + .setStorageType(StorageType.DISK) + .build(); + + volume.createBucket(bucketName, omBucketArgs); + return volume.getBucket(bucketName); + + } + + public static void createKey(OzoneBucket bucket, String keyName, + String content) throws IOException { + try (OutputStream stream = bucket + .createKey(keyName, content.length(), ReplicationType.STAND_ALONE, + ReplicationFactor.ONE, new HashMap<>())) { + stream.write(content.getBytes()); + } + } + + public static String getKey(OzoneBucket bucket, String keyName) + throws IOException { + try (InputStream stream = bucket.readKey(keyName)) { + return new Scanner(stream).useDelimiter("\\A").next(); + } + } + + public static OzoneBucket createVolumeAndBucket(MiniOzoneCluster cluster) + throws IOException { + String volumeName = "volume" + RandomStringUtils.randomNumeric(5); + String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); + return createVolumeAndBucket(cluster, volumeName, bucketName); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneHelper.java deleted file mode 100644 index 8a3d1a85755..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneHelper.java +++ /dev/null @@ -1,413 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone; - -import org.apache.hadoop.ozone.web.exceptions.ErrorTable; -import org.apache.hadoop.ozone.client.rest.headers.Header; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; -import org.apache.hadoop.util.Time; -import org.apache.http.HttpResponse; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.impl.client.CloseableHttpClient; -import org.apache.http.impl.client.HttpClients; - -import javax.ws.rs.core.HttpHeaders; -import java.io.IOException; -import java.text.SimpleDateFormat; -import java.util.Date; -import java.util.Locale; - -import static java.net.HttpURLConnection.HTTP_CREATED; -import static java.net.HttpURLConnection.HTTP_OK; -import static org.junit.Assert.assertEquals; - -/** - * Helper functions to test Ozone. - */ -public class TestOzoneHelper { - - public CloseableHttpClient createHttpClient() { - return HttpClients.createDefault(); - } - /** - * Creates Volumes on Ozone Store. - * - * @throws IOException - */ - public void testCreateVolumes(int port) throws IOException { - SimpleDateFormat format = - new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US); - CloseableHttpClient client = createHttpClient(); - String volumeName = OzoneUtils.getRequestID().toLowerCase(); - try { - HttpPost httppost = new HttpPost( - String.format("http://localhost:%d/%s", port, volumeName)); - - httppost.addHeader(Header.OZONE_VERSION_HEADER, - Header.OZONE_V1_VERSION_HEADER); - httppost.addHeader(HttpHeaders.DATE, - format.format(new Date(Time.monotonicNow()))); - httppost.addHeader(HttpHeaders.AUTHORIZATION, - Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " + - OzoneConsts.OZONE_SIMPLE_HDFS_USER); - httppost.addHeader(Header.OZONE_USER, OzoneConsts.OZONE_SIMPLE_HDFS_USER); - - HttpResponse response = client.execute(httppost); - assertEquals(response.toString(), HTTP_CREATED, - response.getStatusLine().getStatusCode()); - } finally { - client.close(); - } - } - - /** - * Create Volumes with Quota. - * - * @throws IOException - */ - public void testCreateVolumesWithQuota(int port) throws IOException { - SimpleDateFormat format = - new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US); - CloseableHttpClient client = createHttpClient(); - String volumeName = OzoneUtils.getRequestID().toLowerCase(); - try { - HttpPost httppost = new HttpPost( - String.format("http://localhost:%d/%s?quota=10TB", port, volumeName)); - - httppost.addHeader(Header.OZONE_VERSION_HEADER, - Header.OZONE_V1_VERSION_HEADER); - httppost.addHeader(HttpHeaders.DATE, - format.format(new Date(Time.monotonicNow()))); - httppost.addHeader(HttpHeaders.AUTHORIZATION, - Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " + - OzoneConsts.OZONE_SIMPLE_HDFS_USER); - httppost.addHeader(Header.OZONE_USER, OzoneConsts.OZONE_SIMPLE_HDFS_USER); - - HttpResponse response = client.execute(httppost); - assertEquals(response.toString(), HTTP_CREATED, - response.getStatusLine().getStatusCode()); - } finally { - client.close(); - } - } - - /** - * Create Volumes with Invalid Quota. - * - * @throws IOException - */ - public void testCreateVolumesWithInvalidQuota(int port) throws IOException { - SimpleDateFormat format = - new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US); - CloseableHttpClient client = createHttpClient(); - String volumeName = OzoneUtils.getRequestID().toLowerCase(); - try { - HttpPost httppost = new HttpPost( - String.format("http://localhost:%d/%s?quota=NaN", port, volumeName)); - - httppost.addHeader(Header.OZONE_VERSION_HEADER, - Header.OZONE_V1_VERSION_HEADER); - httppost.addHeader(HttpHeaders.DATE, - format.format(new Date(Time.monotonicNow()))); - httppost.addHeader(HttpHeaders.AUTHORIZATION, - Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " + - OzoneConsts.OZONE_SIMPLE_HDFS_USER); - httppost.addHeader(Header.OZONE_USER, OzoneConsts.OZONE_SIMPLE_HDFS_USER); - - HttpResponse response = client.execute(httppost); - assertEquals(response.toString(), ErrorTable.MALFORMED_QUOTA - .getHttpCode(), - response.getStatusLine().getStatusCode()); - } finally { - client.close(); - } - } - - /** - * To create a volume a user name must be specified using OZONE_USER header. - * This test verifies that we get an error in case we call without a OZONE - * user name. - * - * @throws IOException - */ - public void testCreateVolumesWithInvalidUser(int port) throws IOException { - SimpleDateFormat format = - new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US); - CloseableHttpClient client = createHttpClient(); - String volumeName = OzoneUtils.getRequestID().toLowerCase(); - try { - HttpPost httppost = new HttpPost( - String.format("http://localhost:%d/%s?quota=1TB", port, volumeName)); - - httppost.addHeader(Header.OZONE_VERSION_HEADER, - Header.OZONE_V1_VERSION_HEADER); - httppost.addHeader(HttpHeaders.DATE, - format.format(new Date(Time.monotonicNow()))); - httppost.addHeader(HttpHeaders.AUTHORIZATION, - Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " + - OzoneConsts.OZONE_SIMPLE_HDFS_USER); - - HttpResponse response = client.execute(httppost); - - assertEquals(response.toString(), ErrorTable.USER_NOT_FOUND.getHttpCode(), - response.getStatusLine().getStatusCode()); - } finally { - client.close(); - } - } - - /** - * Only Admins can create volumes in Ozone. This test uses simple userauth as - * backend and hdfs and root are admin users in the simple backend. - *

- * This test tries to create a volume as user bilbo. - * - * @throws IOException - */ - public void testCreateVolumesWithOutAdminRights(int port) throws IOException { - SimpleDateFormat format = - new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US); - CloseableHttpClient client = createHttpClient(); - String volumeName = OzoneUtils.getRequestID().toLowerCase(); - try { - HttpPost httppost = new HttpPost( - String.format("http://localhost:%d/%s?quota=NaN", port, volumeName)); - - httppost.addHeader(Header.OZONE_VERSION_HEADER, - Header.OZONE_V1_VERSION_HEADER); - httppost.addHeader(HttpHeaders.DATE, - format.format(new Date(Time.monotonicNow()))); - httppost.addHeader(HttpHeaders.AUTHORIZATION, - Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " + - "bilbo"); // This is not a root user in Simple Auth - httppost.addHeader(Header.OZONE_USER, OzoneConsts.OZONE_SIMPLE_HDFS_USER); - - HttpResponse response = client.execute(httppost); - assertEquals(response.toString(), ErrorTable.ACCESS_DENIED.getHttpCode(), - response.getStatusLine().getStatusCode()); - } finally { - client.close(); - } - } - - /** - * Create a bunch of volumes in a loop. - * - * @throws IOException - */ - public void testCreateVolumesInLoop(int port) throws IOException { - SimpleDateFormat format = - new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US); - - for (int x = 0; x < 1000; x++) { - CloseableHttpClient client = createHttpClient(); - String volumeName = OzoneUtils.getRequestID().toLowerCase(); - String userName = OzoneUtils.getRequestID().toLowerCase(); - - HttpPost httppost = new HttpPost( - String.format("http://localhost:%d/%s?quota=10TB", port, volumeName)); - - httppost.addHeader(Header.OZONE_VERSION_HEADER, - Header.OZONE_V1_VERSION_HEADER); - httppost.addHeader(HttpHeaders.DATE, - format.format(new Date(Time.monotonicNow()))); - httppost.addHeader(HttpHeaders.AUTHORIZATION, - Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " + - OzoneConsts.OZONE_SIMPLE_HDFS_USER); - httppost.addHeader(Header.OZONE_USER, userName); - - HttpResponse response = client.execute(httppost); - assertEquals(response.toString(), HTTP_CREATED, - response.getStatusLine().getStatusCode()); - client.close(); - } - } - /** - * Get volumes owned by the user. - * - * @throws IOException - */ - public void testGetVolumesByUser(int port) throws IOException { - SimpleDateFormat format = - new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US); - // We need to create a volume for this test to succeed. - testCreateVolumes(port); - CloseableHttpClient client = createHttpClient(); - try { - HttpGet httpget = - new HttpGet(String.format("http://localhost:%d/", port)); - - httpget.addHeader(Header.OZONE_VERSION_HEADER, - Header.OZONE_V1_VERSION_HEADER); - - httpget.addHeader(HttpHeaders.DATE, - format.format(new Date(Time.monotonicNow()))); - - httpget.addHeader(HttpHeaders.AUTHORIZATION, - Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " + - OzoneConsts.OZONE_SIMPLE_HDFS_USER); - - httpget.addHeader(Header.OZONE_USER, - OzoneConsts.OZONE_SIMPLE_HDFS_USER); - - HttpResponse response = client.execute(httpget); - assertEquals(response.toString(), HTTP_OK, - response.getStatusLine().getStatusCode()); - - } finally { - client.close(); - } - } - - /** - * Admins can read volumes belonging to other users. - * - * @throws IOException - */ - public void testGetVolumesOfAnotherUser(int port) throws IOException { - SimpleDateFormat format = - new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US); - - CloseableHttpClient client = createHttpClient(); - try { - HttpGet httpget = - new HttpGet(String.format("http://localhost:%d/", port)); - - httpget.addHeader(Header.OZONE_VERSION_HEADER, - Header.OZONE_V1_VERSION_HEADER); - httpget.addHeader(HttpHeaders.DATE, - format.format(new Date(Time.monotonicNow()))); - - httpget.addHeader(HttpHeaders.AUTHORIZATION, - Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " + - OzoneConsts.OZONE_SIMPLE_ROOT_USER); - - // User Root is getting volumes belonging to user HDFS - httpget.addHeader(Header.OZONE_USER, OzoneConsts.OZONE_SIMPLE_HDFS_USER); - - HttpResponse response = client.execute(httpget); - assertEquals(response.toString(), HTTP_OK, - response.getStatusLine().getStatusCode()); - - } finally { - client.close(); - } - } - - /** - * if you try to read volumes belonging to another user, - * then server always ignores it. - * - * @throws IOException - */ - public void testGetVolumesOfAnotherUserShouldFail(int port) - throws IOException { - SimpleDateFormat format = - new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US); - - CloseableHttpClient client = createHttpClient(); - String userName = OzoneUtils.getRequestID().toLowerCase(); - try { - HttpGet httpget = - new HttpGet(String.format("http://localhost:%d/", port)); - - httpget.addHeader(Header.OZONE_VERSION_HEADER, - Header.OZONE_V1_VERSION_HEADER); - httpget.addHeader(HttpHeaders.DATE, - format.format(new Date(Time.monotonicNow()))); - - httpget.addHeader(HttpHeaders.AUTHORIZATION, - Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " + - userName); - - // userName is NOT a root user, hence he should NOT be able to read the - // volumes of user HDFS - httpget.addHeader(Header.OZONE_USER, OzoneConsts.OZONE_SIMPLE_HDFS_USER); - - HttpResponse response = client.execute(httpget); - // We will get an Error called userNotFound when using Simple Auth Scheme - assertEquals(response.toString(), ErrorTable.USER_NOT_FOUND.getHttpCode(), - response.getStatusLine().getStatusCode()); - - } finally { - client.close(); - } - } - - public void testListKeyOnEmptyBucket(int port) throws IOException { - SimpleDateFormat format = - new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US); - CloseableHttpClient client = createHttpClient(); - String volumeName = OzoneUtils.getRequestID().toLowerCase(); - String bucketName = OzoneUtils.getRequestID().toLowerCase() + "bucket"; - try { - - HttpPost httppost = new HttpPost( - String.format("http://localhost:%d/%s", port, volumeName)); - httppost.addHeader(Header.OZONE_VERSION_HEADER, - Header.OZONE_V1_VERSION_HEADER); - httppost.addHeader(HttpHeaders.DATE, - format.format(new Date(Time.monotonicNow()))); - httppost.addHeader(HttpHeaders.AUTHORIZATION, - Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " - + OzoneConsts.OZONE_SIMPLE_HDFS_USER); - httppost.addHeader(Header.OZONE_USER, OzoneConsts.OZONE_SIMPLE_HDFS_USER); - HttpResponse response = client.execute(httppost); - assertEquals(response.toString(), HTTP_CREATED, - response.getStatusLine().getStatusCode()); - client.close(); - - client = createHttpClient(); - httppost = new HttpPost(String - .format("http://localhost:%d/%s/%s", port, volumeName, bucketName)); - httppost.addHeader(Header.OZONE_VERSION_HEADER, - Header.OZONE_V1_VERSION_HEADER); - httppost.addHeader(HttpHeaders.DATE, - format.format(new Date(Time.monotonicNow()))); - httppost.addHeader(HttpHeaders.AUTHORIZATION, - Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " - + OzoneConsts.OZONE_SIMPLE_HDFS_USER); - httppost.addHeader(Header.OZONE_USER, OzoneConsts.OZONE_SIMPLE_HDFS_USER); - response = client.execute(httppost); - assertEquals(response.toString(), HTTP_CREATED, - response.getStatusLine().getStatusCode()); - client.close(); - - client = createHttpClient(); - HttpGet httpget = new HttpGet(String - .format("http://localhost:%d/%s/%s", port, volumeName, bucketName)); - httpget.addHeader(Header.OZONE_VERSION_HEADER, - Header.OZONE_V1_VERSION_HEADER); - httpget.addHeader(HttpHeaders.DATE, - format.format(new Date(Time.monotonicNow()))); - httpget.addHeader(HttpHeaders.AUTHORIZATION, - Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " - + OzoneConsts.OZONE_SIMPLE_HDFS_USER); - httpget.addHeader(Header.OZONE_USER, OzoneConsts.OZONE_SIMPLE_HDFS_USER); - response = client.execute(httpget); - assertEquals(response.toString() + " " + response.getStatusLine() - .getReasonPhrase(), HTTP_OK, - response.getStatusLine().getStatusCode()); - - } finally { - client.close(); - } - } - -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java index bb0a3e69207..5aceda015b2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java @@ -16,37 +16,30 @@ */ package org.apache.hadoop.ozone; +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; +import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; +import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; +import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; +import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; +import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.utils.MetadataKeyFilters; +import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter; + import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.Sets; import com.google.common.primitives.Longs; import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.StorageType; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.web.handlers.BucketArgs; -import org.apache.hadoop.ozone.web.handlers.KeyArgs; -import org.apache.hadoop.ozone.web.handlers.UserArgs; -import org.apache.hadoop.ozone.web.handlers.VolumeArgs; -import org.apache.hadoop.ozone.web.interfaces.StorageHandler; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; -import org.apache.hadoop.utils.MetadataKeyFilters; -import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; - -import java.io.IOException; -import java.io.OutputStream; -import java.util.List; -import java.util.Map; -import java.util.Set; /** * A helper class used by {@link TestStorageContainerManager} to generate @@ -56,53 +49,32 @@ public class TestStorageContainerManagerHelper { private final MiniOzoneCluster cluster; private final Configuration conf; - private final StorageHandler storageHandler; public TestStorageContainerManagerHelper(MiniOzoneCluster cluster, Configuration conf) throws IOException { this.cluster = cluster; this.conf = conf; - storageHandler = new ObjectStoreHandler(conf).getStorageHandler(); } public Map createKeys(int numOfKeys, int keySize) throws Exception { Map keyLocationMap = Maps.newHashMap(); - String volume = "volume" + RandomStringUtils.randomNumeric(5); - String bucket = "bucket" + RandomStringUtils.randomNumeric(5); - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - UserArgs userArgs = new UserArgs(null, OzoneUtils.getRequestID(), - null, null, null, null); - VolumeArgs createVolumeArgs = new VolumeArgs(volume, userArgs); - createVolumeArgs.setUserName(userName); - createVolumeArgs.setAdminName(adminName); - storageHandler.createVolume(createVolumeArgs); - - BucketArgs bucketArgs = new BucketArgs(bucket, createVolumeArgs); - bucketArgs.setStorageType(StorageType.DISK); - storageHandler.createBucket(bucketArgs); - - // Write 20 keys in bucket. + OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster); + // Write 20 keys in bucketName. Set keyNames = Sets.newHashSet(); - KeyArgs keyArgs; for (int i = 0; i < numOfKeys; i++) { String keyName = RandomStringUtils.randomAlphabetic(5) + i; keyNames.add(keyName); - keyArgs = new KeyArgs(keyName, bucketArgs); - keyArgs.setSize(keySize); - // Just for testing list keys call, so no need to write real data. - OutputStream stream = storageHandler.newKeyWriter(keyArgs); - stream.write(DFSUtil.string2Bytes( - RandomStringUtils.randomAlphabetic(5))); - stream.close(); + + TestDataUtil + .createKey(bucket, keyName, RandomStringUtils.randomAlphabetic(5)); } for (String key : keyNames) { OmKeyArgs arg = new OmKeyArgs.Builder() - .setVolumeName(volume) - .setBucketName(bucket) + .setVolumeName(bucket.getVolumeName()) + .setBucketName(bucket.getName()) .setKeyName(key) .setRefreshPipeline(true) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index 920cf04770e..957977f3ffe 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -58,6 +58,7 @@ import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.client.OzoneKey; import org.apache.hadoop.ozone.client.OzoneKeyDetails; @@ -67,7 +68,6 @@ import org.apache.hadoop.ozone.client.VolumeArgs; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.client.rest.OzoneException; import org.apache.hadoop.ozone.common.OzoneChecksumException; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.interfaces.Container; @@ -240,7 +240,7 @@ public void testOMClientProxyProvider() { @Test public void testSetVolumeQuota() - throws IOException, OzoneException { + throws IOException { String volumeName = UUID.randomUUID().toString(); store.createVolume(volumeName); store.getVolume(volumeName).setQuota( @@ -264,7 +264,7 @@ public void testDeleteVolume() @Test public void testCreateVolumeWithMetadata() - throws IOException, OzoneException { + throws IOException, OzoneClientException { String volumeName = UUID.randomUUID().toString(); VolumeArgs volumeArgs = VolumeArgs.newBuilder() .addMetadata("key1", "val1") @@ -278,7 +278,7 @@ public void testCreateVolumeWithMetadata() @Test public void testCreateBucketWithMetadata() - throws IOException, OzoneException { + throws IOException, OzoneClientException { long currentTime = Time.now(); String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); @@ -297,7 +297,7 @@ public void testCreateBucketWithMetadata() @Test public void testCreateBucket() - throws IOException, OzoneException { + throws IOException, OzoneClientException { long currentTime = Time.now(); String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); @@ -312,7 +312,7 @@ public void testCreateBucket() @Test public void testCreateS3Bucket() - throws IOException, OzoneException { + throws IOException, OzoneClientException { long currentTime = Time.now(); String userName = UserGroupInformation.getCurrentUser().getUserName(); String bucketName = UUID.randomUUID().toString(); @@ -345,7 +345,7 @@ public void testCreateSecureS3Bucket() throws IOException { @Test public void testListS3Buckets() - throws IOException, OzoneException { + throws IOException, OzoneClientException { String userName = "ozone100"; String bucketName1 = UUID.randomUUID().toString(); String bucketName2 = UUID.randomUUID().toString(); @@ -363,7 +363,7 @@ public void testListS3Buckets() @Test public void testListS3BucketsFail() - throws IOException, OzoneException { + throws IOException, OzoneClientException { String userName = "randomUser"; Iterator iterator = store.listS3Buckets(userName, null); @@ -402,7 +402,7 @@ public void testDeleteS3NonExistingBucket() { @Test public void testCreateS3BucketMapping() - throws IOException, OzoneException { + throws IOException, OzoneClientException { long currentTime = Time.now(); String userName = "ozone"; String bucketName = UUID.randomUUID().toString(); @@ -421,7 +421,7 @@ public void testCreateS3BucketMapping() @Test public void testCreateBucketWithVersioning() - throws IOException, OzoneException { + throws IOException, OzoneClientException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); store.createVolume(volumeName); @@ -436,7 +436,7 @@ public void testCreateBucketWithVersioning() @Test public void testCreateBucketWithStorageType() - throws IOException, OzoneException { + throws IOException, OzoneClientException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); store.createVolume(volumeName); @@ -451,7 +451,7 @@ public void testCreateBucketWithStorageType() @Test public void testCreateBucketWithAcls() - throws IOException, OzoneException { + throws IOException, OzoneClientException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); OzoneAcl userAcl = new OzoneAcl(USER, "test", @@ -470,7 +470,7 @@ public void testCreateBucketWithAcls() @Test public void testCreateBucketWithAllArgument() - throws IOException, OzoneException { + throws IOException, OzoneClientException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); OzoneAcl userAcl = new OzoneAcl(USER, "test", @@ -507,7 +507,7 @@ public void testInvalidBucketCreation() throws Exception { @Test public void testAddBucketAcl() - throws IOException, OzoneException { + throws IOException, OzoneClientException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); store.createVolume(volumeName); @@ -526,7 +526,7 @@ public void testAddBucketAcl() @Test public void testRemoveBucketAcl() - throws IOException, OzoneException { + throws IOException, OzoneClientException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); OzoneAcl userAcl = new OzoneAcl(USER, "test", @@ -581,7 +581,7 @@ public void testRemoveBucketAclUsingRpcClientRemoveAcl() @Test public void testSetBucketVersioning() - throws IOException, OzoneException { + throws IOException, OzoneClientException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); store.createVolume(volumeName); @@ -618,7 +618,7 @@ public void testAclsAfterCallingSetBucketProperty() throws Exception { @Test public void testSetBucketStorageType() - throws IOException, OzoneException { + throws IOException, OzoneClientException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); store.createVolume(volumeName); @@ -678,7 +678,7 @@ private boolean verifyRatisReplication(String volumeName, String bucketName, @Test public void testPutKey() - throws IOException, OzoneException { + throws IOException, OzoneClientException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); long currentTime = Time.now(); @@ -746,7 +746,7 @@ public void testValidateBlockLengthWithCommitKey() throws IOException { @Test public void testPutKeyRatisOneNode() - throws IOException, OzoneException { + throws IOException, OzoneClientException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); long currentTime = Time.now(); @@ -781,7 +781,7 @@ public void testPutKeyRatisOneNode() @Test public void testPutKeyRatisThreeNodes() - throws IOException, OzoneException { + throws IOException, OzoneClientException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); long currentTime = Time.now(); @@ -976,7 +976,7 @@ private void readKey(OzoneBucket bucket, String keyName, String data) } @Test - public void testGetKeyDetails() throws IOException, OzoneException { + public void testGetKeyDetails() throws IOException, OzoneClientException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java index e9dfb101ccd..b1adb73b878 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java @@ -48,7 +48,6 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.ozShell.TestOzoneShell; import org.apache.hadoop.ozone.protocol.commands.RetriableDatanodeEventWatcher; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.LogCapturer; @@ -97,7 +96,7 @@ public static void init() throws Exception { GenericTestUtils.setLogLevel(SCMBlockDeletingService.LOG, Level.DEBUG); String path = - GenericTestUtils.getTempPath(TestOzoneShell.class.getSimpleName()); + GenericTestUtils.getTempPath(TestBlockDeletion.class.getSimpleName()); File baseDir = new File(path); baseDir.mkdirs(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java index b1fa6fb2245..74cde37bcea 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java @@ -17,8 +17,15 @@ */ package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; +import java.io.IOException; +import java.util.HashMap; +import java.util.concurrent.TimeoutException; + +import org.apache.hadoop.hdds.client.ReplicationFactor; +import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -26,32 +33,25 @@ import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.client.rest.OzoneException; import org.apache.hadoop.ozone.container.common.impl.ContainerData; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE; import org.apache.hadoop.test.GenericTestUtils; + +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE; import org.junit.Assert; import org.junit.Test; -import java.io.IOException; -import java.util.HashMap; -import java.util.concurrent.TimeoutException; - /** * Test to behaviour of the datanode when recieve close container command. */ public class TestCloseContainerHandler { @Test - public void test() throws IOException, TimeoutException, InterruptedException, - OzoneException { + public void test() + throws IOException, TimeoutException, InterruptedException { //setup a cluster (1G free space is enough for a unit test) OzoneConfiguration conf = new OzoneConfiguration(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestMultipleContainerReadWrite.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestMultipleContainerReadWrite.java deleted file mode 100644 index 328d1be2a11..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestMultipleContainerReadWrite.java +++ /dev/null @@ -1,208 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om; - -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.protocol.StorageType; -import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.web.handlers.BucketArgs; -import org.apache.hadoop.ozone.web.handlers.KeyArgs; -import org.apache.hadoop.ozone.web.handlers.UserArgs; -import org.apache.hadoop.ozone.web.handlers.VolumeArgs; -import org.apache.hadoop.ozone.web.interfaces.StorageHandler; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Ignore; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; - -import static org.apache.hadoop.test.MetricsAsserts.assertCounter; -import static org.apache.hadoop.test.MetricsAsserts.getMetrics; -import static org.junit.Assert.assertEquals; - -/** - * Test key write/read where a key can span multiple containers. - */ -public class TestMultipleContainerReadWrite { - private static MiniOzoneCluster cluster = null; - private static StorageHandler storageHandler; - private static UserArgs userArgs; - private static OzoneConfiguration conf; - - @Rule - public ExpectedException exception = ExpectedException.none(); - - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ - @BeforeClass - public static void init() throws Exception { - conf = new OzoneConfiguration(); - conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 1, - StorageUnit.MB); - conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 5); - cluster = MiniOzoneCluster.newBuilder(conf).build(); - cluster.waitForClusterToBeReady(); - storageHandler = new ObjectStoreHandler(conf).getStorageHandler(); - userArgs = new UserArgs(null, OzoneUtils.getRequestID(), - null, null, null, null); - } - - /** - * Shutdown MiniDFSCluster. - */ - @AfterClass - public static void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testWriteRead() throws Exception { - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - String keyName = "key" + RandomStringUtils.randomNumeric(5); - - VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs); - createVolumeArgs.setUserName(userName); - createVolumeArgs.setAdminName(adminName); - storageHandler.createVolume(createVolumeArgs); - - BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs); - bucketArgs.setStorageType(StorageType.DISK); - storageHandler.createBucket(bucketArgs); - - String dataString = RandomStringUtils.randomAscii(3 * (int)OzoneConsts.MB); - KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs); - keyArgs.setSize(3 * (int)OzoneConsts.MB); - keyArgs.setUserName(userName); - - try (OutputStream outputStream = storageHandler.newKeyWriter(keyArgs)) { - outputStream.write(dataString.getBytes()); - } - - byte[] data = new byte[dataString.length()]; - try (InputStream inputStream = storageHandler.newKeyReader(keyArgs)) { - inputStream.read(data, 0, data.length); - } - assertEquals(dataString, new String(data)); - // checking whether container meta data has the chunk file persisted. - MetricsRecordBuilder containerMetrics = getMetrics( - "StorageContainerMetrics"); - assertCounter("numWriteChunk", 3L, containerMetrics); - assertCounter("numReadChunk", 3L, containerMetrics); - } - - // Disable this test, because this tests assumes writing beyond a specific - // size is not allowed. Which is not true for now. Keeping this test in case - // we add this restrict in the future. - @Ignore - @Test - public void testErrorWrite() throws Exception { - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - String keyName = "key" + RandomStringUtils.randomNumeric(5); - - VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs); - createVolumeArgs.setUserName(userName); - createVolumeArgs.setAdminName(adminName); - storageHandler.createVolume(createVolumeArgs); - - BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs); - bucketArgs.setStorageType(StorageType.DISK); - storageHandler.createBucket(bucketArgs); - - String dataString1 = RandomStringUtils.randomAscii(100); - String dataString2 = RandomStringUtils.randomAscii(500); - KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs); - keyArgs.setSize(500); - - try (OutputStream outputStream = storageHandler.newKeyWriter(keyArgs)) { - // first write will write succeed - outputStream.write(dataString1.getBytes()); - // second write - exception.expect(IOException.class); - exception.expectMessage( - "Can not write 500 bytes with only 400 byte space"); - outputStream.write(dataString2.getBytes()); - } - } - - @Test - public void testPartialRead() throws Exception { - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - String keyName = "key" + RandomStringUtils.randomNumeric(5); - - VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs); - createVolumeArgs.setUserName(userName); - createVolumeArgs.setAdminName(adminName); - storageHandler.createVolume(createVolumeArgs); - - BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs); - bucketArgs.setStorageType(StorageType.DISK); - storageHandler.createBucket(bucketArgs); - - String dataString = RandomStringUtils.randomAscii(500); - KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs); - keyArgs.setSize(500); - keyArgs.setUserName(userName); - - try (OutputStream outputStream = storageHandler.newKeyWriter(keyArgs)) { - outputStream.write(dataString.getBytes()); - } - - byte[] data = new byte[600]; - try (InputStream inputStream = storageHandler.newKeyReader(keyArgs)) { - int readLen = inputStream.read(data, 0, 340); - assertEquals(340, readLen); - assertEquals(dataString.substring(0, 340), - new String(data).substring(0, 340)); - - readLen = inputStream.read(data, 340, 260); - assertEquals(160, readLen); - assertEquals(dataString, new String(data).substring(0, 500)); - - readLen = inputStream.read(data, 500, 1); - assertEquals(-1, readLen); - } - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmAcls.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmAcls.java index 3b6389b8c82..d7e3a6f51c5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmAcls.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmAcls.java @@ -19,26 +19,23 @@ import java.util.UUID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.StorageType; -import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneTestUtils; +import org.apache.hadoop.ozone.TestDataUtil; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.client.VolumeArgs; import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.IOzoneObj; import org.apache.hadoop.ozone.security.acl.RequestContext; -import org.apache.hadoop.ozone.web.handlers.BucketArgs; -import org.apache.hadoop.ozone.web.handlers.KeyArgs; -import org.apache.hadoop.ozone.web.handlers.UserArgs; -import org.apache.hadoop.ozone.web.handlers.VolumeArgs; -import org.apache.hadoop.ozone.web.interfaces.StorageHandler; -import org.apache.hadoop.ozone.web.request.OzoneQuota; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.apache.hadoop.test.GenericTestUtils; import org.apache.commons.lang3.RandomStringUtils; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS; import org.junit.AfterClass; import static org.junit.Assert.assertTrue; @@ -52,9 +49,8 @@ */ public class TestOmAcls { + private static boolean aclAllow = true; private static MiniOzoneCluster cluster = null; - private static StorageHandler storageHandler; - private static UserArgs userArgs; private static OMMetrics omMetrics; private static OzoneConfiguration conf; private static String clusterId; @@ -80,15 +76,13 @@ public static void init() throws Exception { conf.setInt(OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, 2); conf.setClass(OZONE_ACL_AUTHORIZER_CLASS, OzoneAccessAuthorizerTest.class, IAccessAuthorizer.class); + conf.setStrings(OZONE_ADMINISTRATORS, OZONE_ADMINISTRATORS_WILDCARD); cluster = MiniOzoneCluster.newBuilder(conf) .setClusterId(clusterId) .setScmId(scmId) .setOmId(omId) .build(); cluster.waitForClusterToBeReady(); - storageHandler = new ObjectStoreHandler(conf).getStorageHandler(); - userArgs = new UserArgs(null, OzoneUtils.getRequestID(), - null, null, null, null); omMetrics = cluster.getOzoneManager().getMetrics(); logCapturer = GenericTestUtils.LogCapturer.captureLogs(OzoneManager.getLogger()); @@ -104,65 +98,54 @@ public static void shutdown() { } } - /** * Tests the OM Initialization. */ - @Test - public void testOMAclsPermissionDenied() throws Exception { - String user0 = "testListVolumes-user-0"; - String adminUser = "testListVolumes-admin"; - final VolumeArgs createVolumeArgs; - int i = 100; - String user0VolName = "Vol-" + user0 + "-" + i; - createVolumeArgs = new VolumeArgs(user0VolName, userArgs); - createVolumeArgs.setUserName(user0); - createVolumeArgs.setAdminName(adminUser); - createVolumeArgs.setQuota(new OzoneQuota(i, OzoneQuota.Units.GB)); - logCapturer.clearOutput(); - OzoneTestUtils.expectOmException(ResultCodes.PERMISSION_DENIED, - () -> storageHandler.createVolume(createVolumeArgs)); - assertTrue(logCapturer.getOutput().contains("Only admin users are " + - "authorized to create Ozone")); - BucketArgs bucketArgs = new BucketArgs("bucket1", createVolumeArgs); - bucketArgs.setStorageType(StorageType.DISK); + @Test + public void testBucketCreationPermissionDenied() throws Exception { + + TestOmAcls.aclAllow = true; + + String volumeName = RandomStringUtils.randomAlphabetic(5).toLowerCase(); + String bucketName = RandomStringUtils.randomAlphabetic(5).toLowerCase(); + cluster.getClient().getObjectStore().createVolume(volumeName); + OzoneVolume volume = + cluster.getClient().getObjectStore().getVolume(volumeName); + + TestOmAcls.aclAllow = false; OzoneTestUtils.expectOmException(ResultCodes.PERMISSION_DENIED, - () -> storageHandler.createBucket(bucketArgs)); - assertTrue(logCapturer.getOutput().contains("Only admin users are" + - " authorized to create Ozone")); + () -> volume.createBucket(bucketName)); + + assertTrue(logCapturer.getOutput() + .contains("doesn't have CREATE permission to access volume")); } @Test public void testFailureInKeyOp() throws Exception { final VolumeArgs createVolumeArgs; - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - createVolumeArgs = new VolumeArgs(userName, userArgs); - createVolumeArgs.setUserName(userName); - createVolumeArgs.setAdminName(adminName); - createVolumeArgs.setQuota(new OzoneQuota(100, OzoneQuota.Units.GB)); - BucketArgs bucketArgs = new BucketArgs("bucket1", createVolumeArgs); - bucketArgs.setStorageType(StorageType.DISK); + + TestOmAcls.aclAllow = true; + OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster); logCapturer.clearOutput(); - // write a key without specifying size at all - String keyName = "testKey"; - KeyArgs keyArgs = new KeyArgs(keyName, bucketArgs); + TestOmAcls.aclAllow = false; + OzoneTestUtils.expectOmException(ResultCodes.PERMISSION_DENIED, - () -> storageHandler.newKeyWriter(keyArgs)); + () -> TestDataUtil.createKey(bucket, "testKey", "testcontent")); assertTrue(logCapturer.getOutput().contains("doesn't have WRITE " + "permission to access key")); } -} -/** - * Test implementation to negative case. - */ -class OzoneAccessAuthorizerTest implements IAccessAuthorizer { + /** + * Test implementation to negative case. + */ + static class OzoneAccessAuthorizerTest implements IAccessAuthorizer { - @Override - public boolean checkAccess(IOzoneObj ozoneObject, RequestContext context) { - return false; + @Override + public boolean checkAccess(IOzoneObj ozoneObject, RequestContext context) { + return TestOmAcls.aclAllow; + } } + } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java index 0db78c75e14..ff1cf039e0c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java @@ -16,50 +16,39 @@ */ package org.apache.hadoop.ozone.om; -import org.apache.commons.lang3.RandomStringUtils; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.TestDataUtil; +import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; -import org.apache.hadoop.ozone.web.handlers.BucketArgs; -import org.apache.hadoop.ozone.web.handlers.KeyArgs; -import org.apache.hadoop.ozone.web.handlers.UserArgs; -import org.apache.hadoop.ozone.web.handlers.VolumeArgs; -import org.apache.hadoop.ozone.web.interfaces.StorageHandler; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.apache.hadoop.security.UserGroupInformation; + +import org.apache.commons.lang3.RandomStringUtils; import org.junit.AfterClass; +import org.junit.Assert; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; -import org.junit.Assert; import org.junit.rules.ExpectedException; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.util.ArrayList; -import java.util.List; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - /** * This class tests the versioning of blocks from OM side. */ public class TestOmBlockVersioning { private static MiniOzoneCluster cluster = null; - private static UserArgs userArgs; private static OzoneConfiguration conf; private static OzoneManager ozoneManager; - private static StorageHandler storageHandler; @Rule public ExpectedException exception = ExpectedException.none(); @@ -76,9 +65,6 @@ public static void init() throws Exception { conf = new OzoneConfiguration(); cluster = MiniOzoneCluster.newBuilder(conf).build(); cluster.waitForClusterToBeReady(); - storageHandler = new ObjectStoreHandler(conf).getStorageHandler(); - userArgs = new UserArgs(null, OzoneUtils.getRequestID(), - null, null, null, null); ozoneManager = cluster.getOzoneManager(); } @@ -101,14 +87,7 @@ public void testAllocateCommit() throws Exception { String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); String keyName = "key" + RandomStringUtils.randomNumeric(5); - VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs); - createVolumeArgs.setUserName(userName); - createVolumeArgs.setAdminName(adminName); - storageHandler.createVolume(createVolumeArgs); - - BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs); - bucketArgs.setStorageType(StorageType.DISK); - storageHandler.createBucket(bucketArgs); + TestDataUtil.createVolumeAndBucket(cluster, volumeName, bucketName); OmKeyArgs keyArgs = new OmKeyArgs.Builder() .setVolumeName(volumeName) @@ -200,14 +179,8 @@ public void testReadLatestVersion() throws Exception { String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); String keyName = "key" + RandomStringUtils.randomNumeric(5); - VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs); - createVolumeArgs.setUserName(userName); - createVolumeArgs.setAdminName(adminName); - storageHandler.createVolume(createVolumeArgs); - - BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs); - bucketArgs.setStorageType(StorageType.DISK); - storageHandler.createBucket(bucketArgs); + OzoneBucket bucket = + TestDataUtil.createVolumeAndBucket(cluster, volumeName, bucketName); OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() .setVolumeName(volumeName) @@ -218,48 +191,30 @@ public void testReadLatestVersion() throws Exception { .build(); String dataString = RandomStringUtils.randomAlphabetic(100); - KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs); - keyArgs.setUserName(userName); - // this write will create 1st version with one block - try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) { - stream.write(dataString.getBytes()); - } - byte[] data = new byte[dataString.length()]; - try (InputStream in = storageHandler.newKeyReader(keyArgs)) { - in.read(data); - } + + TestDataUtil.createKey(bucket, keyName, dataString); + assertEquals(dataString, TestDataUtil.getKey(bucket, keyName)); OmKeyInfo keyInfo = ozoneManager.lookupKey(omKeyArgs); - assertEquals(dataString, DFSUtil.bytes2String(data)); assertEquals(0, keyInfo.getLatestVersionLocations().getVersion()); assertEquals(1, keyInfo.getLatestVersionLocations().getLocationList().size()); // this write will create 2nd version, 2nd version will contain block from // version 1, and add a new block - dataString = RandomStringUtils.randomAlphabetic(10); - data = new byte[dataString.length()]; - try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) { - stream.write(dataString.getBytes()); - } - try (InputStream in = storageHandler.newKeyReader(keyArgs)) { - in.read(data); - } + TestDataUtil.createKey(bucket, keyName, dataString); + + keyInfo = ozoneManager.lookupKey(omKeyArgs); - assertEquals(dataString, DFSUtil.bytes2String(data)); + assertEquals(dataString, TestDataUtil.getKey(bucket, keyName)); assertEquals(1, keyInfo.getLatestVersionLocations().getVersion()); assertEquals(2, keyInfo.getLatestVersionLocations().getLocationList().size()); dataString = RandomStringUtils.randomAlphabetic(200); - data = new byte[dataString.length()]; - try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) { - stream.write(dataString.getBytes()); - } - try (InputStream in = storageHandler.newKeyReader(keyArgs)) { - in.read(data); - } + TestDataUtil.createKey(bucket, keyName, dataString); + keyInfo = ozoneManager.lookupKey(omKeyArgs); - assertEquals(dataString, DFSUtil.bytes2String(data)); + assertEquals(dataString, TestDataUtil.getKey(bucket, keyName)); assertEquals(2, keyInfo.getLatestVersionLocations().getVersion()); assertEquals(3, keyInfo.getLatestVersionLocations().getLocationList().size()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmInit.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmInit.java index 4b736c9436d..de42fdccc03 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmInit.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmInit.java @@ -16,14 +16,14 @@ */ package org.apache.hadoop.ozone.om; +import java.io.IOException; +import java.util.UUID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.web.handlers.UserArgs; -import org.apache.hadoop.ozone.web.interfaces.StorageHandler; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.apache.hadoop.security.authentication.client.AuthenticationException; + +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -31,18 +31,11 @@ import org.junit.Test; import org.junit.rules.ExpectedException; -import java.io.IOException; -import java.util.UUID; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS; - /** * Test Ozone Manager Init. */ public class TestOmInit { private static MiniOzoneCluster cluster = null; - private static StorageHandler storageHandler; - private static UserArgs userArgs; private static OMMetrics omMetrics; private static OzoneConfiguration conf; private static String clusterId; @@ -72,9 +65,6 @@ public static void init() throws Exception { .setOmId(omId) .build(); cluster.waitForClusterToBeReady(); - storageHandler = new ObjectStoreHandler(conf).getStorageHandler(); - userArgs = new UserArgs(null, OzoneUtils.getRequestID(), - null, null, null, null); omMetrics = cluster.getOzoneManager().getMetrics(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java deleted file mode 100644 index 1fb3b001ff2..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java +++ /dev/null @@ -1,1449 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.net.InetSocketAddress; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.text.ParseException; -import java.util.List; -import java.util.Random; -import java.util.Set; -import java.util.UUID; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -import org.apache.commons.codec.binary.StringUtils; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.StorageType; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.ScmInfo; -import org.apache.hadoop.hdds.scm.client.HddsClientUtils; -import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.OzoneTestUtils; -import org.apache.hadoop.ozone.client.rest.OzoneException; -import org.apache.hadoop.ozone.common.BlockGroup; -import org.apache.hadoop.ozone.om.codec.OmKeyInfoCodec; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.helpers.OpenKeySession; -import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; -import org.apache.hadoop.ozone.om.helpers.ServiceInfo; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DBUpdatesRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServicePort; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeList; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; -import org.apache.hadoop.ozone.util.OzoneVersionInfo; -import org.apache.hadoop.ozone.web.handlers.BucketArgs; -import org.apache.hadoop.ozone.web.handlers.KeyArgs; -import org.apache.hadoop.ozone.web.handlers.ListArgs; -import org.apache.hadoop.ozone.web.handlers.UserArgs; -import org.apache.hadoop.ozone.web.handlers.VolumeArgs; -import org.apache.hadoop.ozone.web.interfaces.StorageHandler; -import org.apache.hadoop.ozone.web.request.OzoneQuota; -import org.apache.hadoop.ozone.web.response.BucketInfo; -import org.apache.hadoop.ozone.web.response.KeyInfo; -import org.apache.hadoop.ozone.web.response.ListBuckets; -import org.apache.hadoop.ozone.web.response.ListKeys; -import org.apache.hadoop.ozone.web.response.ListVolumes; -import org.apache.hadoop.ozone.web.response.VolumeInfo; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.utils.db.DBUpdatesWrapper; -import org.apache.hadoop.utils.db.RDBStore; -import org.apache.hadoop.utils.db.Table; -import org.apache.hadoop.utils.db.Table.KeyValue; -import org.apache.hadoop.utils.db.TableIterator; - -import org.apache.commons.lang3.RandomStringUtils; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY; -import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL; - -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; -import org.rocksdb.RocksDB; - -/** - * Test Ozone Manager operation in distributed handler scenario. - */ -public class TestOzoneManager { - private MiniOzoneCluster cluster = null; - private StorageHandler storageHandler; - private UserArgs userArgs; - private OMMetrics omMetrics; - private OzoneConfiguration conf; - private String clusterId; - private String scmId; - private String omId; - - @Rule - public Timeout timeout = new Timeout(60000); - - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ - @Before - public void init() throws Exception { - conf = new OzoneConfiguration(); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); - omId = UUID.randomUUID().toString(); - conf.setBoolean(OZONE_ACL_ENABLED, true); - conf.setInt(OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, 2); - conf.set(OZONE_ADMINISTRATORS, OZONE_ADMINISTRATORS_WILDCARD); - cluster = MiniOzoneCluster.newBuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) - .setOmId(omId) - .build(); - cluster.waitForClusterToBeReady(); - storageHandler = new ObjectStoreHandler(conf).getStorageHandler(); - userArgs = new UserArgs(null, OzoneUtils.getRequestID(), - null, null, null, null); - omMetrics = cluster.getOzoneManager().getMetrics(); - } - - /** - * Shutdown MiniDFSCluster. - */ - @After - public void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - // Create a volume and test its attribute after creating them - @Test - public void testCreateVolume() throws IOException, OzoneException { - long volumeCreateFailCount = omMetrics.getNumVolumeCreateFails(); - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - - VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs); - createVolumeArgs.setUserName(userName); - createVolumeArgs.setAdminName(adminName); - storageHandler.createVolume(createVolumeArgs); - - VolumeArgs getVolumeArgs = new VolumeArgs(volumeName, userArgs); - VolumeInfo retVolumeinfo = storageHandler.getVolumeInfo(getVolumeArgs); - Assert.assertTrue(retVolumeinfo.getVolumeName().equals(volumeName)); - Assert.assertTrue(retVolumeinfo.getOwner().getName().equals(userName)); - Assert.assertEquals(volumeCreateFailCount, - omMetrics.getNumVolumeCreateFails()); - } - - // Create a volume and modify the volume owner and then test its attributes - @Test - public void testChangeVolumeOwner() throws IOException, OzoneException { - long volumeCreateFailCount = omMetrics.getNumVolumeCreateFails(); - long volumeInfoFailCount = omMetrics.getNumVolumeInfoFails(); - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - - VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs); - createVolumeArgs.setUserName(userName); - createVolumeArgs.setAdminName(adminName); - storageHandler.createVolume(createVolumeArgs); - - String newUserName = "user" + RandomStringUtils.randomNumeric(5); - createVolumeArgs.setUserName(newUserName); - storageHandler.setVolumeOwner(createVolumeArgs); - - VolumeArgs getVolumeArgs = new VolumeArgs(volumeName, userArgs); - VolumeInfo retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs); - - Assert.assertTrue(retVolumeInfo.getVolumeName().equals(volumeName)); - Assert.assertFalse(retVolumeInfo.getOwner().getName().equals(userName)); - Assert.assertTrue(retVolumeInfo.getOwner().getName().equals(newUserName)); - Assert.assertEquals(volumeCreateFailCount, - omMetrics.getNumVolumeCreateFails()); - Assert.assertEquals(volumeInfoFailCount, - omMetrics.getNumVolumeInfoFails()); - } - - // Create a volume and modify the volume owner and then test its attributes - @Test - public void testChangeVolumeQuota() throws IOException, OzoneException { - long numVolumeCreateFail = omMetrics.getNumVolumeCreateFails(); - long numVolumeInfoFail = omMetrics.getNumVolumeInfoFails(); - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - Random rand = new Random(); - - // Create a new volume with a quota - OzoneQuota createQuota = - new OzoneQuota(rand.nextInt(100), OzoneQuota.Units.GB); - VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs); - createVolumeArgs.setUserName(userName); - createVolumeArgs.setAdminName(adminName); - createVolumeArgs.setQuota(createQuota); - storageHandler.createVolume(createVolumeArgs); - - VolumeArgs getVolumeArgs = new VolumeArgs(volumeName, userArgs); - VolumeInfo retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs); - Assert.assertEquals(createQuota.sizeInBytes(), - retVolumeInfo.getQuota().sizeInBytes()); - - // Set a new quota and test it - OzoneQuota setQuota = - new OzoneQuota(rand.nextInt(100), OzoneQuota.Units.GB); - createVolumeArgs.setQuota(setQuota); - storageHandler.setVolumeQuota(createVolumeArgs, false); - getVolumeArgs = new VolumeArgs(volumeName, userArgs); - retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs); - Assert.assertEquals(setQuota.sizeInBytes(), - retVolumeInfo.getQuota().sizeInBytes()); - - // Remove the quota and test it again - storageHandler.setVolumeQuota(createVolumeArgs, true); - getVolumeArgs = new VolumeArgs(volumeName, userArgs); - retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs); - Assert.assertEquals(OzoneConsts.MAX_QUOTA_IN_BYTES, - retVolumeInfo.getQuota().sizeInBytes()); - Assert.assertEquals(numVolumeCreateFail, - omMetrics.getNumVolumeCreateFails()); - Assert.assertEquals(numVolumeInfoFail, - omMetrics.getNumVolumeInfoFails()); - } - - // Create a volume and then delete it and then check for deletion - @Test - public void testDeleteVolume() throws IOException, OzoneException { - long volumeCreateFailCount = omMetrics.getNumVolumeCreateFails(); - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String volumeName1 = volumeName + "_A"; - String volumeName2 = volumeName + "_AA"; - VolumeArgs volumeArgs = null; - VolumeInfo volumeInfo = null; - - // Create 2 empty volumes with same prefix. - volumeArgs = new VolumeArgs(volumeName1, userArgs); - volumeArgs.setUserName(userName); - volumeArgs.setAdminName(adminName); - storageHandler.createVolume(volumeArgs); - - volumeArgs = new VolumeArgs(volumeName2, userArgs); - volumeArgs.setUserName(userName); - volumeArgs.setAdminName(adminName); - storageHandler.createVolume(volumeArgs); - - volumeArgs = new VolumeArgs(volumeName1, userArgs); - volumeInfo = storageHandler.getVolumeInfo(volumeArgs); - Assert.assertTrue(volumeInfo.getVolumeName().equals(volumeName1)); - Assert.assertTrue(volumeInfo.getOwner().getName().equals(userName)); - Assert.assertEquals(volumeCreateFailCount, - omMetrics.getNumVolumeCreateFails()); - - // Volume with _A should be able to delete as it is empty. - storageHandler.deleteVolume(volumeArgs); - - // Make sure volume with _AA suffix still exists. - volumeArgs = new VolumeArgs(volumeName2, userArgs); - volumeInfo = storageHandler.getVolumeInfo(volumeArgs); - Assert.assertTrue(volumeInfo.getVolumeName().equals(volumeName2)); - - // Make sure volume with _A suffix is successfully deleted. - try { - volumeArgs = new VolumeArgs(volumeName1, userArgs); - storageHandler.getVolumeInfo(volumeArgs); - Assert.fail("Volume is not deleted"); - } catch (OMException ex) { - Assert.assertEquals(ResultCodes.VOLUME_NOT_FOUND, ex.getResult()); - } - //delete the _AA volume, too - storageHandler.deleteVolume(new VolumeArgs(volumeName2, userArgs)); - - //Make sure there is no volume information for the specific user - OMMetadataManager metadataManager = - cluster.getOzoneManager().getMetadataManager(); - - String userKey = metadataManager.getUserKey(userName); - VolumeList volumes = metadataManager.getUserTable().get(userKey); - - //that was the last volume of the user, shouldn't be any record here - Assert.assertNull(volumes); - - - } - - // Create a volume and a bucket inside the volume, - // then delete it and then check for deletion failure - @Test - public void testFailedDeleteVolume() throws IOException, OzoneException { - long numVolumeCreateFails = omMetrics.getNumVolumeCreateFails(); - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - - VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs); - createVolumeArgs.setUserName(userName); - createVolumeArgs.setAdminName(adminName); - storageHandler.createVolume(createVolumeArgs); - - VolumeArgs getVolumeArgs = new VolumeArgs(volumeName, userArgs); - VolumeInfo retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs); - Assert.assertTrue(retVolumeInfo.getVolumeName().equals(volumeName)); - Assert.assertTrue(retVolumeInfo.getOwner().getName().equals(userName)); - Assert.assertEquals(numVolumeCreateFails, - omMetrics.getNumVolumeCreateFails()); - - BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs); - storageHandler.createBucket(bucketArgs); - - try { - storageHandler.deleteVolume(createVolumeArgs); - Assert.fail("Expecting deletion should fail " - + "because volume is not empty"); - } catch (OMException ex) { - Assert.assertEquals(ResultCodes.VOLUME_NOT_EMPTY, ex.getResult()); - } - retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs); - Assert.assertTrue(retVolumeInfo.getVolumeName().equals(volumeName)); - Assert.assertTrue(retVolumeInfo.getOwner().getName().equals(userName)); - } - - // Create a volume and test Volume access for a different user - @Test - public void testAccessVolume() throws IOException, OzoneException { - String userName = UserGroupInformation.getCurrentUser().getUserName(); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String[] groupName = - {"group" + RandomStringUtils.randomNumeric(5)}; - - VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs); - createVolumeArgs.setUserName(userName); - createVolumeArgs.setAdminName(adminName); - createVolumeArgs.setGroups(groupName); - storageHandler.createVolume(createVolumeArgs); - - OzoneAcl userAcl = new OzoneAcl(ACLIdentityType.USER, userName, - ACLType.READ, ACCESS); - Assert.assertTrue(storageHandler.checkVolumeAccess(volumeName, userAcl)); - OzoneAcl group = new OzoneAcl(ACLIdentityType.GROUP, groupName[0], - ACLType.READ, ACCESS); - Assert.assertTrue(storageHandler.checkVolumeAccess(volumeName, group)); - - // Create a different user and access should fail - String falseUserName = "user" + RandomStringUtils.randomNumeric(5); - OzoneAcl falseUserAcl = - new OzoneAcl(ACLIdentityType.USER, falseUserName, - ACLType.ALL, ACCESS); - Assert.assertFalse(storageHandler - .checkVolumeAccess(volumeName, falseUserAcl)); - // Checking access with user name and Group Type should fail - OzoneAcl falseGroupAcl = new OzoneAcl(ACLIdentityType.GROUP, userName, - ACLType.ALL, ACCESS); - Assert.assertFalse(storageHandler - .checkVolumeAccess(volumeName, falseGroupAcl)); - - // Access for acl type world should also fail - OzoneAcl worldAcl = - new OzoneAcl(ACLIdentityType.WORLD, "", ACLType.READ, ACCESS); - Assert.assertFalse(storageHandler.checkVolumeAccess(volumeName, worldAcl)); - - Assert.assertEquals(0, omMetrics.getNumVolumeCheckAccessFails()); - Assert.assertEquals(0, omMetrics.getNumVolumeCreateFails()); - } - - @Test - public void testCreateBucket() throws IOException, OzoneException { - long numVolumeCreateFail = omMetrics.getNumVolumeCreateFails(); - long numBucketCreateFail = omMetrics.getNumBucketCreateFails(); - long numBucketInfoFail = omMetrics.getNumBucketInfoFails(); - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - - VolumeArgs volumeArgs = new VolumeArgs(volumeName, userArgs); - volumeArgs.setUserName(userName); - volumeArgs.setAdminName(adminName); - storageHandler.createVolume(volumeArgs); - - BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs); - storageHandler.createBucket(bucketArgs); - - BucketArgs getBucketArgs = new BucketArgs(volumeName, bucketName, - userArgs); - BucketInfo bucketInfo = storageHandler.getBucketInfo(getBucketArgs); - Assert.assertTrue(bucketInfo.getVolumeName().equals(volumeName)); - Assert.assertTrue(bucketInfo.getBucketName().equals(bucketName)); - Assert.assertEquals(numVolumeCreateFail, - omMetrics.getNumVolumeCreateFails()); - Assert.assertEquals(numBucketCreateFail, - omMetrics.getNumBucketCreateFails()); - Assert.assertEquals(numBucketInfoFail, - omMetrics.getNumBucketInfoFails()); - } - - @Test - public void testDeleteBucket() throws Exception { - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - VolumeArgs volumeArgs = new VolumeArgs(volumeName, userArgs); - volumeArgs.setUserName(userName); - volumeArgs.setAdminName(adminName); - storageHandler.createVolume(volumeArgs); - BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs); - storageHandler.createBucket(bucketArgs); - BucketArgs getBucketArgs = new BucketArgs(volumeName, bucketName, - userArgs); - BucketInfo bucketInfo = storageHandler.getBucketInfo(getBucketArgs); - Assert.assertTrue(bucketInfo.getVolumeName().equals(volumeName)); - Assert.assertTrue(bucketInfo.getBucketName().equals(bucketName)); - storageHandler.deleteBucket(bucketArgs); - - OzoneTestUtils.expectOmException(ResultCodes.BUCKET_NOT_FOUND, - () -> storageHandler.getBucketInfo(getBucketArgs)); - } - - @Test - public void testDeleteNonExistingBucket() throws Exception { - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - VolumeArgs volumeArgs = new VolumeArgs(volumeName, userArgs); - volumeArgs.setUserName(userName); - volumeArgs.setAdminName(adminName); - storageHandler.createVolume(volumeArgs); - BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs); - storageHandler.createBucket(bucketArgs); - BucketArgs getBucketArgs = new BucketArgs(volumeName, bucketName, - userArgs); - BucketInfo bucketInfo = storageHandler.getBucketInfo(getBucketArgs); - Assert.assertTrue(bucketInfo.getVolumeName().equals(volumeName)); - Assert.assertTrue(bucketInfo.getBucketName().equals(bucketName)); - BucketArgs newBucketArgs = new BucketArgs( - volumeName, bucketName + "_invalid", userArgs); - OzoneTestUtils.expectOmException(ResultCodes.BUCKET_NOT_FOUND, - () -> storageHandler.deleteBucket(newBucketArgs)); - } - - - @Test - public void testDeleteNonEmptyBucket() throws Exception { - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - String keyName = "key" + RandomStringUtils.randomNumeric(5); - VolumeArgs volumeArgs = new VolumeArgs(volumeName, userArgs); - volumeArgs.setUserName(userName); - volumeArgs.setAdminName(adminName); - storageHandler.createVolume(volumeArgs); - BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs); - storageHandler.createBucket(bucketArgs); - BucketArgs getBucketArgs = new BucketArgs(volumeName, bucketName, - userArgs); - BucketInfo bucketInfo = storageHandler.getBucketInfo(getBucketArgs); - Assert.assertTrue(bucketInfo.getVolumeName().equals(volumeName)); - Assert.assertTrue(bucketInfo.getBucketName().equals(bucketName)); - String dataString = RandomStringUtils.randomAscii(100); - KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs); - keyArgs.setSize(100); - keyArgs.setUserName(userName); - try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) { - stream.write(dataString.getBytes()); - } - OzoneTestUtils.expectOmException(ResultCodes.BUCKET_NOT_EMPTY, - () -> storageHandler.deleteBucket(bucketArgs)); - - } - - /** - * Basic test of both putKey and getKey from OM, as one can not be tested - * without the other. - * - * @throws IOException - * @throws OzoneException - */ - @Test - public void testGetKeyWriterReader() throws IOException, OzoneException { - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - String keyName = "key" + RandomStringUtils.randomNumeric(5); - long numKeyAllocates = omMetrics.getNumKeyAllocates(); - long numKeyLookups = omMetrics.getNumKeyLookups(); - - VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs); - createVolumeArgs.setUserName(userName); - createVolumeArgs.setAdminName(adminName); - storageHandler.createVolume(createVolumeArgs); - - BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs); - bucketArgs.setStorageType(StorageType.DISK); - storageHandler.createBucket(bucketArgs); - - String dataString = RandomStringUtils.randomAscii(100); - KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs); - keyArgs.setSize(100); - keyArgs.setUserName(userName); - try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) { - stream.write(dataString.getBytes()); - } - Assert.assertEquals(1 + numKeyAllocates, omMetrics.getNumKeyAllocates()); - - byte[] data = new byte[dataString.length()]; - try (InputStream in = storageHandler.newKeyReader(keyArgs)) { - in.read(data); - } - Assert.assertEquals(dataString, DFSUtil.bytes2String(data)); - Assert.assertEquals(1 + numKeyLookups, omMetrics.getNumKeyLookups()); - } - - /** - * Test write the same key twice, the second write should fail, as currently - * key overwrite is not supported. - * - * @throws IOException - * @throws OzoneException - */ - @Test - public void testKeyOverwrite() throws IOException, OzoneException { - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - String keyName = "key" + RandomStringUtils.randomNumeric(5); - long numKeyAllocateFails = omMetrics.getNumKeyAllocateFails(); - - VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs); - createVolumeArgs.setUserName(userName); - createVolumeArgs.setAdminName(adminName); - storageHandler.createVolume(createVolumeArgs); - - BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs); - bucketArgs.setStorageType(StorageType.DISK); - storageHandler.createBucket(bucketArgs); - - KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs); - keyArgs.setSize(100); - keyArgs.setUserName(userName); - String dataString = RandomStringUtils.randomAscii(100); - try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) { - stream.write(dataString.getBytes()); - } - - // We allow the key overwrite to be successful. Please note : Till - // HDFS-11922 is fixed this causes a data block leak on the data node side. - // That is this overwrite only overwrites the keys on OM. We need to - // garbage collect those blocks from datanode. - KeyArgs keyArgs2 = new KeyArgs(volumeName, bucketName, keyName, userArgs); - keyArgs2.setUserName(userName); - storageHandler.newKeyWriter(keyArgs2); - Assert - .assertEquals(numKeyAllocateFails, omMetrics.getNumKeyAllocateFails()); - } - - /** - * Test get a non-exiting key. - * - * @throws IOException - * @throws OzoneException - */ - @Test - public void testGetNonExistKey() throws Exception { - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - String keyName = "key" + RandomStringUtils.randomNumeric(5); - long numKeyLookupFails = omMetrics.getNumKeyLookupFails(); - - VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs); - createVolumeArgs.setUserName(userName); - createVolumeArgs.setAdminName(adminName); - storageHandler.createVolume(createVolumeArgs); - - BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs); - bucketArgs.setStorageType(StorageType.DISK); - storageHandler.createBucket(bucketArgs); - - KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs); - // try to get the key, should fail as it hasn't been created - OzoneTestUtils.expectOmException(KEY_NOT_FOUND, - () -> storageHandler.newKeyReader(keyArgs)); - Assert.assertEquals(1 + numKeyLookupFails, - omMetrics.getNumKeyLookupFails()); - } - - /** - * Test delete keys for om. - * - * @throws IOException - * @throws OzoneException - */ - @Test - public void testDeleteKey() throws Exception { - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - String keyName = "key" + RandomStringUtils.randomNumeric(5); - long numKeyDeletes = omMetrics.getNumKeyDeletes(); - long numKeyDeleteFails = omMetrics.getNumKeyDeletesFails(); - - VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs); - createVolumeArgs.setUserName(userName); - createVolumeArgs.setAdminName(adminName); - storageHandler.createVolume(createVolumeArgs); - - BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs); - storageHandler.createBucket(bucketArgs); - - KeyArgs keyArgs = new KeyArgs(keyName, bucketArgs); - keyArgs.setSize(100); - String dataString = RandomStringUtils.randomAscii(100); - try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) { - stream.write(dataString.getBytes()); - } - - storageHandler.deleteKey(keyArgs); - Assert.assertEquals(1 + numKeyDeletes, omMetrics.getNumKeyDeletes()); - - // Make sure the deleted key has been moved to the deleted table. - OMMetadataManager manager = cluster.getOzoneManager(). - getMetadataManager(); - try (TableIterator> iter = - manager.getDeletedTable().iterator()) { - iter.seekToFirst(); - Table.KeyValue kv = iter.next(); - Assert.assertNotNull(kv); - } - - // Delete the key again to test deleting non-existing key. - OzoneTestUtils.expectOmException(KEY_NOT_FOUND, - () -> storageHandler.deleteKey(keyArgs)); - Assert.assertEquals(1 + numKeyDeleteFails, - omMetrics.getNumKeyDeletesFails()); - } - - /** - * Test rename key for om. - * - * @throws IOException - * @throws OzoneException - */ - @Test - public void testRenameKey() throws IOException, OzoneException { - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - String keyName = "key" + RandomStringUtils.randomNumeric(5); - long numKeyRenames = omMetrics.getNumKeyRenames(); - long numKeyRenameFails = omMetrics.getNumKeyRenameFails(); - int testRenameFails = 0; - int testRenames = 0; - OMException omException = null; - - VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs); - createVolumeArgs.setUserName(userName); - createVolumeArgs.setAdminName(adminName); - storageHandler.createVolume(createVolumeArgs); - - BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs); - storageHandler.createBucket(bucketArgs); - - KeyArgs keyArgs = new KeyArgs(keyName, bucketArgs); - keyArgs.setSize(100); - String toKeyName = "key" + RandomStringUtils.randomNumeric(5); - - // Rename from non-existent key should fail - try { - testRenames++; - storageHandler.renameKey(keyArgs, toKeyName); - } catch (OMException e) { - testRenameFails++; - omException = e; - } - Assert.assertEquals(KEY_NOT_FOUND, omException.getResult()); - - // Write the contents of the key to be renamed - String dataString = RandomStringUtils.randomAscii(100); - try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) { - stream.write(dataString.getBytes()); - } - - // Rename the key - toKeyName = "key" + RandomStringUtils.randomNumeric(5); - testRenames++; - storageHandler.renameKey(keyArgs, toKeyName); - Assert.assertEquals(numKeyRenames + testRenames, - omMetrics.getNumKeyRenames()); - Assert.assertEquals(numKeyRenameFails + testRenameFails, - omMetrics.getNumKeyRenameFails()); - - // Try to get the key, should fail as it has been renamed - try { - storageHandler.newKeyReader(keyArgs); - } catch (OMException e) { - omException = e; - } - Assert.assertEquals(KEY_NOT_FOUND, omException.getResult()); - - // Verify the contents of the renamed key - keyArgs = new KeyArgs(toKeyName, bucketArgs); - InputStream in = storageHandler.newKeyReader(keyArgs); - byte[] b = new byte[dataString.getBytes().length]; - in.read(b); - Assert.assertEquals(new String(b), dataString); - - // Rewrite the renamed key. Rename to key which already exists should fail. - keyArgs = new KeyArgs(keyName, bucketArgs); - keyArgs.setSize(100); - dataString = RandomStringUtils.randomAscii(100); - try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) { - stream.write(dataString.getBytes()); - stream.close(); - testRenames++; - storageHandler.renameKey(keyArgs, toKeyName); - } catch (OMException e) { - testRenameFails++; - omException = e; - } - Assert.assertEquals(ResultCodes.KEY_ALREADY_EXISTS, - omException.getResult()); - - // Rename to empty string should fail - toKeyName = ""; - try { - testRenames++; - storageHandler.renameKey(keyArgs, toKeyName); - } catch (OMException e) { - testRenameFails++; - omException = e; - } - Assert.assertEquals(ResultCodes.INVALID_KEY_NAME, omException.getResult()); - - // Rename from empty string should fail - keyArgs = new KeyArgs("", bucketArgs); - toKeyName = "key" + RandomStringUtils.randomNumeric(5); - try { - testRenames++; - storageHandler.renameKey(keyArgs, toKeyName); - } catch (OMException e) { - testRenameFails++; - omException = e; - } - Assert.assertEquals(ResultCodes.INVALID_KEY_NAME, omException.getResult()); - - Assert.assertEquals(numKeyRenames + testRenames, - omMetrics.getNumKeyRenames()); - Assert.assertEquals(numKeyRenameFails + testRenameFails, - omMetrics.getNumKeyRenameFails()); - } - - @Test - public void testListBuckets() throws IOException, OzoneException { - ListBuckets result = null; - ListArgs listBucketArgs = null; - - // Create volume - volA. - final String volAname = "volA"; - VolumeArgs volAArgs = new VolumeArgs(volAname, userArgs); - volAArgs.setUserName("userA"); - volAArgs.setAdminName("adminA"); - storageHandler.createVolume(volAArgs); - - // Create 20 buckets in volA for tests. - for (int i=0; i<10; i++) { - // Create "/volA/aBucket_0" to "/volA/aBucket_9" buckets in volA volume. - BucketArgs aBuckets = new BucketArgs(volAname, - "aBucket_" + i, userArgs); - if(i % 3 == 0) { - aBuckets.setStorageType(StorageType.ARCHIVE); - } else { - aBuckets.setStorageType(StorageType.DISK); - } - storageHandler.createBucket(aBuckets); - - // Create "/volA/bBucket_0" to "/volA/bBucket_9" buckets in volA volume. - BucketArgs bBuckets = new BucketArgs(volAname, - "bBucket_" + i, userArgs); - if(i % 3 == 0) { - bBuckets.setStorageType(StorageType.RAM_DISK); - } else { - bBuckets.setStorageType(StorageType.SSD); - } - storageHandler.createBucket(bBuckets); - } - - VolumeArgs volArgs = new VolumeArgs(volAname, userArgs); - - // List all buckets in volA. - listBucketArgs = new ListArgs(volArgs, null, 100, null); - result = storageHandler.listBuckets(listBucketArgs); - Assert.assertEquals(20, result.getBuckets().size()); - List archiveBuckets = result.getBuckets().stream() - .filter(item -> item.getStorageType() == StorageType.ARCHIVE) - .collect(Collectors.toList()); - Assert.assertEquals(4, archiveBuckets.size()); - - // List buckets with prefix "aBucket". - listBucketArgs = new ListArgs(volArgs, "aBucket", 100, null); - result = storageHandler.listBuckets(listBucketArgs); - Assert.assertEquals(10, result.getBuckets().size()); - Assert.assertTrue(result.getBuckets().stream() - .allMatch(entry -> entry.getBucketName().startsWith("aBucket"))); - - // List a certain number of buckets. - listBucketArgs = new ListArgs(volArgs, null, 3, null); - result = storageHandler.listBuckets(listBucketArgs); - Assert.assertEquals(3, result.getBuckets().size()); - Assert.assertEquals("aBucket_0", - result.getBuckets().get(0).getBucketName()); - Assert.assertEquals("aBucket_1", - result.getBuckets().get(1).getBucketName()); - Assert.assertEquals("aBucket_2", - result.getBuckets().get(2).getBucketName()); - - // List a certain number of buckets from the startKey. - listBucketArgs = new ListArgs(volArgs, null, 2, "bBucket_3"); - result = storageHandler.listBuckets(listBucketArgs); - Assert.assertEquals(2, result.getBuckets().size()); - Assert.assertEquals("bBucket_4", - result.getBuckets().get(0).getBucketName()); - Assert.assertEquals("bBucket_5", - result.getBuckets().get(1).getBucketName()); - - // Provide an invalid bucket name as start key. - listBucketArgs = new ListArgs(volArgs, null, 100, "unknown_bucket_name"); - ListBuckets buckets = storageHandler.listBuckets(listBucketArgs); - Assert.assertEquals(buckets.getBuckets().size(), 0); - - // Use all arguments. - listBucketArgs = new ListArgs(volArgs, "b", 5, "bBucket_7"); - result = storageHandler.listBuckets(listBucketArgs); - Assert.assertEquals(2, result.getBuckets().size()); - Assert.assertEquals("bBucket_8", - result.getBuckets().get(0).getBucketName()); - Assert.assertEquals("bBucket_9", - result.getBuckets().get(1).getBucketName()); - - // Provide an invalid maxKeys argument. - try { - listBucketArgs = new ListArgs(volArgs, null, -1, null); - storageHandler.listBuckets(listBucketArgs); - Assert.fail("Expecting an error when the given" - + " maxKeys argument is invalid."); - } catch (Exception e) { - Assert.assertTrue(e.getMessage() - .contains(String.format("the value must be in range (0, %d]", - OzoneConsts.MAX_LISTBUCKETS_SIZE))); - } - - // Provide an invalid volume name. - VolumeArgs invalidVolArgs = new VolumeArgs("invalid_name", userArgs); - try { - listBucketArgs = new ListArgs(invalidVolArgs, null, 100, null); - storageHandler.listBuckets(listBucketArgs); - Assert.fail("Expecting an error when the given volume name is invalid."); - } catch (OMException e) { - Assert.assertEquals(VOLUME_NOT_FOUND, e.getResult()); - } - } - - /** - * Test list keys. - * @throws IOException - * @throws OzoneException - */ - @Test - public void testListKeys() throws Exception { - ListKeys result = null; - ListArgs listKeyArgs = null; - - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - - VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs); - createVolumeArgs.setUserName(userName); - createVolumeArgs.setAdminName(adminName); - storageHandler.createVolume(createVolumeArgs); - - BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs); - bucketArgs.setStorageType(StorageType.DISK); - storageHandler.createBucket(bucketArgs); - - // Write 20 keys in bucket. - int numKeys = 20; - String keyName = "Key"; - KeyArgs keyArgs = null; - for (int i = 0; i < numKeys; i++) { - if (i % 2 == 0) { - // Create /volume/bucket/aKey[0,2,4,...,18] in bucket. - keyArgs = new KeyArgs("a" + keyName + i, bucketArgs); - } else { - // Create /volume/bucket/bKey[1,3,5,...,19] in bucket. - keyArgs = new KeyArgs("b" + keyName + i, bucketArgs); - } - keyArgs.setSize(4096); - - // Just for testing list keys call, so no need to write real data. - OutputStream stream = storageHandler.newKeyWriter(keyArgs); - stream.close(); - } - - // List all keys in bucket. - bucketArgs = new BucketArgs(volumeName, bucketName, userArgs); - listKeyArgs = new ListArgs(bucketArgs, null, 100, null); - result = storageHandler.listKeys(listKeyArgs); - Assert.assertEquals(numKeys, result.getKeyList().size()); - - // List keys with prefix "aKey". - listKeyArgs = new ListArgs(bucketArgs, "aKey", 100, null); - result = storageHandler.listKeys(listKeyArgs); - Assert.assertEquals(numKeys / 2, result.getKeyList().size()); - Assert.assertTrue(result.getKeyList().stream() - .allMatch(entry -> entry.getKeyName().startsWith("aKey"))); - - // List a certain number of keys. - listKeyArgs = new ListArgs(bucketArgs, null, 3, null); - result = storageHandler.listKeys(listKeyArgs); - Assert.assertEquals(3, result.getKeyList().size()); - Assert.assertEquals("aKey0", - result.getKeyList().get(0).getKeyName()); - Assert.assertEquals("aKey10", - result.getKeyList().get(1).getKeyName()); - Assert.assertEquals("aKey12", - result.getKeyList().get(2).getKeyName()); - - // List a certain number of keys from the startKey. - listKeyArgs = new ListArgs(bucketArgs, null, 2, "bKey1"); - result = storageHandler.listKeys(listKeyArgs); - Assert.assertEquals(2, result.getKeyList().size()); - Assert.assertEquals("bKey11", - result.getKeyList().get(0).getKeyName()); - Assert.assertEquals("bKey13", - result.getKeyList().get(1).getKeyName()); - - // Provide an invalid key name as start key. - listKeyArgs = new ListArgs(bucketArgs, null, 100, "invalid_start_key"); - ListKeys keys = storageHandler.listKeys(listKeyArgs); - Assert.assertEquals(keys.getKeyList().size(), 0); - - // Provide an invalid maxKeys argument. - try { - listKeyArgs = new ListArgs(bucketArgs, null, -1, null); - storageHandler.listBuckets(listKeyArgs); - Assert.fail("Expecting an error when the given" - + " maxKeys argument is invalid."); - } catch (Exception e) { - GenericTestUtils.assertExceptionContains( - String.format("the value must be in range (0, %d]", - OzoneConsts.MAX_LISTKEYS_SIZE), e); - } - - OzoneTestUtils.expectOmException(ResultCodes.BUCKET_NOT_FOUND, () -> { - // Provide an invalid bucket name. - BucketArgs bucket = new BucketArgs("invalid_bucket", createVolumeArgs); - ListArgs ks = new ListArgs(bucket, null, numKeys, null); - storageHandler.listKeys(ks); - }); - } - - @Test - public void testListVolumes() throws IOException, OzoneException { - UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); - String user0 = ugi.getUserName(); - String user1 = "testListVolumes-user-1"; - String adminUser = "testListVolumes-admin"; - ListArgs listVolumeArgs; - ListVolumes volumes; - - // Create 10 volumes by user0 and user1 - String[] user0vols = new String[10]; - String[] user1vols = new String[10]; - for (int i =0; i<10; i++) { - VolumeArgs createVolumeArgs; - String user0VolName = "Vol-" + user0 + "-" + i; - user0vols[i] = user0VolName; - createVolumeArgs = new VolumeArgs(user0VolName, userArgs); - createVolumeArgs.setUserName(user0); - createVolumeArgs.setAdminName(adminUser); - createVolumeArgs.setQuota(new OzoneQuota(i, OzoneQuota.Units.GB)); - storageHandler.createVolume(createVolumeArgs); - - String user1VolName = "Vol-" + user1 + "-" + i; - user1vols[i] = user1VolName; - createVolumeArgs = new VolumeArgs(user1VolName, userArgs); - createVolumeArgs.setUserName(user1); - createVolumeArgs.setAdminName(adminUser); - createVolumeArgs.setQuota(new OzoneQuota(i, OzoneQuota.Units.GB)); - storageHandler.createVolume(createVolumeArgs); - } - - // Test list all volumes - Removed Support for this operation for time - // being. TODO: we will need to bring this back if needed. - UserArgs userArgs0 = new UserArgs(user0, OzoneUtils.getRequestID(), - null, null, null, null); - //listVolumeArgs = new ListArgs(userArgs0,"Vol-testListVolumes", 100, null); - // listVolumeArgs.setRootScan(true); - // volumes = storageHandler.listVolumes(listVolumeArgs); - // Assert.assertEquals(20, volumes.getVolumes().size()); - - // Test list all volumes belongs to an user - listVolumeArgs = new ListArgs(userArgs0, null, 100, null); - listVolumeArgs.setRootScan(false); - volumes = storageHandler.listVolumes(listVolumeArgs); - Assert.assertEquals(10, volumes.getVolumes().size()); - - // Test prefix - listVolumeArgs = new ListArgs(userArgs0, - "Vol-" + user0 + "-3", 100, null); - volumes = storageHandler.listVolumes(listVolumeArgs); - Assert.assertEquals(1, volumes.getVolumes().size()); - Assert.assertEquals(user0vols[3], - volumes.getVolumes().get(0).getVolumeName()); - Assert.assertEquals(user0, - volumes.getVolumes().get(0).getOwner().getName()); - - // Test list volumes by user - UserArgs userArgs1 = new UserArgs(user1, OzoneUtils.getRequestID(), - null, null, null, null); - listVolumeArgs = new ListArgs(userArgs1, null, 100, null); - listVolumeArgs.setRootScan(false); - volumes = storageHandler.listVolumes(listVolumeArgs); - Assert.assertEquals(0, volumes.getVolumes().size()); - - // Make sure all available fields are returned - final String user0vol4 = "Vol-" + user0 + "-4"; - final String user0vol5 = "Vol-" + user0 + "-5"; - listVolumeArgs = new ListArgs(userArgs0, null, 1, user0vol4); - listVolumeArgs.setRootScan(false); - volumes = storageHandler.listVolumes(listVolumeArgs); - Assert.assertEquals(1, volumes.getVolumes().size()); - Assert.assertEquals(user0, - volumes.getVolumes().get(0).getOwner().getName()); - Assert.assertEquals(user0vol5, - volumes.getVolumes().get(0).getVolumeName()); - Assert.assertEquals(5, - volumes.getVolumes().get(0).getQuota().getSize()); - Assert.assertEquals(OzoneQuota.Units.GB, - volumes.getVolumes().get(0).getQuota().getUnit()); - - // User doesn't have volumes - UserArgs userArgsX = new UserArgs("unknwonUser", OzoneUtils.getRequestID(), - null, null, null, null); - listVolumeArgs = new ListArgs(userArgsX, null, 100, null); - listVolumeArgs.setRootScan(false); - volumes = storageHandler.listVolumes(listVolumeArgs); - Assert.assertEquals(0, volumes.getVolumes().size()); - } - - /** - * Test get key information. - * - * @throws IOException - * @throws OzoneException - */ - @Test - public void testGetKeyInfo() throws IOException, - OzoneException, ParseException { - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - long currentTime = Time.now(); - - VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs); - createVolumeArgs.setUserName(userName); - createVolumeArgs.setAdminName(adminName); - storageHandler.createVolume(createVolumeArgs); - - BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs); - bucketArgs.setStorageType(StorageType.DISK); - storageHandler.createBucket(bucketArgs); - - String keyName = "testKey"; - KeyArgs keyArgs = new KeyArgs(keyName, bucketArgs); - keyArgs.setSize(4096); - - - OutputStream stream = storageHandler.newKeyWriter(keyArgs); - stream.close(); - - KeyInfo keyInfo = storageHandler.getKeyInfo(keyArgs); - // Compare the time in second unit since the date string reparsed to - // millisecond will lose precision. - Assert.assertTrue( - (HddsClientUtils.formatDateTime(keyInfo.getCreatedOn()) / 1000) >= ( - currentTime / 1000)); - Assert.assertTrue( - (HddsClientUtils.formatDateTime(keyInfo.getModifiedOn()) / 1000) >= ( - currentTime / 1000)); - Assert.assertEquals(keyName, keyInfo.getKeyName()); - // with out data written, the size would be 0 - Assert.assertEquals(0, keyInfo.getSize()); - } - - /** - * Test that the write can proceed without having to set the right size. - * - * @throws IOException - */ - @Test - public void testWriteSize() throws IOException, OzoneException { - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - - VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs); - createVolumeArgs.setUserName(userName); - createVolumeArgs.setAdminName(adminName); - storageHandler.createVolume(createVolumeArgs); - - BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs); - bucketArgs.setStorageType(StorageType.DISK); - storageHandler.createBucket(bucketArgs); - - String dataString = RandomStringUtils.randomAscii(100); - // write a key without specifying size at all - String keyName = "testKey"; - KeyArgs keyArgs = new KeyArgs(keyName, bucketArgs); - try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) { - stream.write(dataString.getBytes()); - } - byte[] data = new byte[dataString.length()]; - try (InputStream in = storageHandler.newKeyReader(keyArgs)) { - in.read(data); - } - Assert.assertEquals(dataString, DFSUtil.bytes2String(data)); - - // write a key with a size, but write above it. - String keyName1 = "testKey1"; - KeyArgs keyArgs1 = new KeyArgs(keyName1, bucketArgs); - keyArgs1.setSize(30); - try (OutputStream stream = storageHandler.newKeyWriter(keyArgs1)) { - stream.write(dataString.getBytes()); - } - byte[] data1 = new byte[dataString.length()]; - try (InputStream in = storageHandler.newKeyReader(keyArgs1)) { - in.read(data1); - } - Assert.assertEquals(dataString, DFSUtil.bytes2String(data1)); - } - - /** - * Tests the RPC call for getting scmId and clusterId from SCM. - * @throws IOException - */ - @Test - public void testGetScmInfo() throws IOException { - ScmInfo info = cluster.getOzoneManager().getScmInfo(); - Assert.assertEquals(clusterId, info.getClusterId()); - Assert.assertEquals(scmId, info.getScmId()); - } - - - //Disabling this test - @Ignore("Disabling this test until Open Key is fixed.") - public void testExpiredOpenKey() throws Exception { -// BackgroundService openKeyCleanUpService = ((BlockManagerImpl)cluster -// .getOzoneManager().getBlockManager()).getOpenKeyCleanupService(); - - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - - VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs); - createVolumeArgs.setUserName(userName); - createVolumeArgs.setAdminName(adminName); - storageHandler.createVolume(createVolumeArgs); - - BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs); - bucketArgs.setStorageType(StorageType.DISK); - storageHandler.createBucket(bucketArgs); - - // open some keys. - - KeyArgs keyArgs1 = new KeyArgs("testKey1", bucketArgs); - KeyArgs keyArgs2 = new KeyArgs("testKey2", bucketArgs); - KeyArgs keyArgs3 = new KeyArgs("testKey3", bucketArgs); - KeyArgs keyArgs4 = new KeyArgs("testKey4", bucketArgs); - List openKeys; - storageHandler.newKeyWriter(keyArgs1); - storageHandler.newKeyWriter(keyArgs2); - storageHandler.newKeyWriter(keyArgs3); - storageHandler.newKeyWriter(keyArgs4); - - Set expected = Stream.of( - "testKey1", "testKey2", "testKey3", "testKey4") - .collect(Collectors.toSet()); - - // Now all k1-k4 should be in open state, so ExpiredOpenKeys should not - // contain these values. - openKeys = cluster.getOzoneManager() - .getMetadataManager().getExpiredOpenKeys(); - - for (BlockGroup bg : openKeys) { - String[] subs = bg.getGroupID().split("/"); - String keyName = subs[subs.length - 1]; - Assert.assertFalse(expected.contains(keyName)); - } - - Thread.sleep(2000); - // Now all k1-k4 should be in ExpiredOpenKeys - openKeys = cluster.getOzoneManager() - .getMetadataManager().getExpiredOpenKeys(); - for (BlockGroup bg : openKeys) { - String[] subs = bg.getGroupID().split("/"); - String keyName = subs[subs.length - 1]; - if (expected.contains(keyName)) { - expected.remove(keyName); - } - } - Assert.assertEquals(0, expected.size()); - - KeyArgs keyArgs5 = new KeyArgs("testKey5", bucketArgs); - storageHandler.newKeyWriter(keyArgs5); - - //openKeyCleanUpService.triggerBackgroundTaskForTesting(); - Thread.sleep(2000); - // now all k1-k4 should have been removed by the clean-up task, only k5 - // should be present in ExpiredOpenKeys. - openKeys = - cluster.getOzoneManager().getMetadataManager().getExpiredOpenKeys(); - System.out.println(openKeys); - boolean key5found = false; - Set removed = Stream.of( - "testKey1", "testKey2", "testKey3", "testKey4") - .collect(Collectors.toSet()); - for (BlockGroup bg : openKeys) { - String[] subs = bg.getGroupID().split("/"); - String keyName = subs[subs.length - 1]; - Assert.assertFalse(removed.contains(keyName)); - if (keyName.equals("testKey5")) { - key5found = true; - } - } - Assert.assertTrue(key5found); - } - - /** - * Tests the OM Initialization. - * @throws IOException - */ - @Test - public void testOmInitialization() throws IOException { - // Read the version file info from OM version file - OMStorage omStorage = cluster.getOzoneManager().getOmStorage(); - SCMStorageConfig scmStorageConfig = new SCMStorageConfig(conf); - // asserts whether cluster Id and SCM ID are properly set in SCM Version - // file. - Assert.assertEquals(clusterId, scmStorageConfig.getClusterID()); - Assert.assertEquals(scmId, scmStorageConfig.getScmId()); - // asserts whether OM Id is properly set in OM Version file. - Assert.assertEquals(omId, omStorage.getOmId()); - // asserts whether the SCM info is correct in OM Version file. - Assert.assertEquals(clusterId, omStorage.getClusterID()); - Assert.assertEquals(scmId, omStorage.getScmId()); - } - - /** - * Tests the OM Initialization Failure. - * @throws IOException - */ - @Test - public void testOmInitializationFailure() throws Exception { - OzoneConfiguration config = new OzoneConfiguration(); - final String path = - GenericTestUtils.getTempPath(UUID.randomUUID().toString()); - Path metaDirPath = Paths.get(path, "om-meta"); - config.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDirPath.toString()); - config.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true); - config.set(OZONE_OM_ADDRESS_KEY, "127.0.0.1:0"); - config.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0"); - config.set(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, - conf.get(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY)); - - OzoneTestUtils.expectOmException(ResultCodes.OM_NOT_INITIALIZED, () -> { - OzoneManager.createOm(config); - }); - - OzoneTestUtils - .expectOmException(ResultCodes.SCM_VERSION_MISMATCH_ERROR, () -> { - OMStorage omStore = new OMStorage(config); - omStore.setClusterId("testClusterId"); - omStore.setScmId("testScmId"); - // writes the version file properties - omStore.initialize(); - OzoneManager.createOm(config); - }); - } - - @Test - public void testGetServiceList() throws IOException { - long numGetServiceListCalls = omMetrics.getNumGetServiceLists(); - List services = cluster.getOzoneManager().getServiceList(); - - Assert.assertEquals(numGetServiceListCalls + 1, - omMetrics.getNumGetServiceLists()); - - ServiceInfo omInfo = services.stream().filter( - a -> a.getNodeType().equals(HddsProtos.NodeType.OM)) - .collect(Collectors.toList()).get(0); - InetSocketAddress omAddress = new InetSocketAddress(omInfo.getHostname(), - omInfo.getPort(ServicePort.Type.RPC)); - Assert.assertEquals(NetUtils.createSocketAddr( - conf.get(OZONE_OM_ADDRESS_KEY)), omAddress); - - ServiceInfo scmInfo = services.stream().filter( - a -> a.getNodeType().equals(HddsProtos.NodeType.SCM)) - .collect(Collectors.toList()).get(0); - InetSocketAddress scmAddress = new InetSocketAddress(scmInfo.getHostname(), - scmInfo.getPort(ServicePort.Type.RPC)); - Assert.assertEquals(NetUtils.createSocketAddr( - conf.get(OZONE_SCM_CLIENT_ADDRESS_KEY)), scmAddress); - } - - @Test - public void testVersion() { - String expectedVersion = OzoneVersionInfo.OZONE_VERSION_INFO.getVersion(); - String actualVersion = cluster.getOzoneManager().getSoftwareVersion(); - Assert.assertEquals(expectedVersion, actualVersion); - } - - /** - * Test if OM RocksDB keyMayExist API works for keys that are present. - * Test added in this module since we need access to custom codec dependent - * objects like OMKeyInfo. - * @throws Exception if OM or RocksDB operations fail. - */ - @Test - public void testDBKeyMayExist() throws Exception { - RDBStore rdbStore = (RDBStore) cluster.getOzoneManager() - .getMetadataManager().getStore(); - RocksDB db = rdbStore.getDb(); - - OmKeyInfo keyInfo = getNewOmKeyInfo(); - OmKeyInfoCodec omKeyInfoCodec = new OmKeyInfoCodec(); - - db.put(StringUtils.getBytesUtf16("OMKey1"), - omKeyInfoCodec.toPersistedFormat(keyInfo)); - - StringBuilder sb = new StringBuilder(); - Assert.assertTrue(db.keyMayExist(StringUtils.getBytesUtf16("OMKey1"), - sb)); - Assert.assertTrue(sb.length() > 0); - } - - - @Test - public void testGetOMDBUpdates() throws IOException { - - DBUpdatesRequest dbUpdatesRequest = - DBUpdatesRequest.newBuilder().setSequenceNumber(0).build(); - - DBUpdatesWrapper dbUpdates = - cluster.getOzoneManager().getDBUpdates(dbUpdatesRequest); - Assert.assertTrue(dbUpdates.getData().isEmpty()); - - //Write data to OM. - OmKeyInfo keyInfo = getNewOmKeyInfo(); - Assert.assertNotNull(keyInfo); - dbUpdates = - cluster.getOzoneManager().getDBUpdates(dbUpdatesRequest); - Assert.assertFalse(dbUpdates.getData().isEmpty()); - - } - - private OmKeyInfo getNewOmKeyInfo() throws IOException { - UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); - OmVolumeArgs volumeArgs = OmVolumeArgs.newBuilder() - .setVolume("vol1") - .setAdminName("bilbo") - .setOwnerName("bilbo") - .build(); - cluster.getOzoneManager().createVolume(volumeArgs); - - OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() - .setVolumeName("vol1") - .setBucketName("bucket1") - .build(); - cluster.getOzoneManager().createBucket(bucketInfo); - - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setBucketName("bucket1") - .setFactor(HddsProtos.ReplicationFactor.ONE) - .setDataSize(0) - .setType(HddsProtos.ReplicationType.STAND_ALONE) - .setAcls(OzoneAclUtil.getAclList(ugi.getUserName(), ugi.getGroups(), - ALL, ALL)) - .setVolumeName("vol1") - .setKeyName(UUID.randomUUID().toString()) - .setDataSize(16 * 1024 * 1024 * 10) - .build(); - OpenKeySession keySession = cluster.getOzoneManager().getKeyManager() - .openKey(keyArgs); - return keySession.getKeyInfo(); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java index 76841ddcf68..443f3059fd1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java @@ -18,7 +18,10 @@ package org.apache.hadoop.ozone.om; -import org.apache.commons.lang3.RandomStringUtils; +import java.io.IOException; +import java.util.HashMap; +import java.util.UUID; + import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -29,25 +32,20 @@ import org.apache.hadoop.ozone.client.OzoneKey; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.web.handlers.UserArgs; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.apache.hadoop.test.GenericTestUtils; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; - -import java.io.IOException; -import java.util.HashMap; -import java.util.UUID; +import org.apache.commons.lang3.RandomStringUtils; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS; +import org.junit.After; +import org.junit.Assert; import static org.junit.Assert.fail; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.Timeout; /** * Test some client operations after cluster starts. And perform restart and @@ -55,7 +53,6 @@ */ public class TestOzoneManagerRestart { private MiniOzoneCluster cluster = null; - private UserArgs userArgs; private OzoneConfiguration conf; private String clusterId; private String scmId; @@ -86,8 +83,7 @@ public void init() throws Exception { .setOmId(omId) .build(); cluster.waitForClusterToBeReady(); - userArgs = new UserArgs(null, OzoneUtils.getRequestID(), - null, null, null, null); + } /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneDatanodeShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneDatanodeShell.java index e2942a4f7e9..65bc2751423 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneDatanodeShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneDatanodeShell.java @@ -17,13 +17,10 @@ */ package org.apache.hadoop.ozone.ozShell; -import com.google.common.base.Strings; - import java.io.ByteArrayOutputStream; import java.io.File; import java.io.PrintStream; import java.util.Arrays; -import java.util.Collection; import java.util.List; import org.apache.hadoop.fs.FileUtil; @@ -31,19 +28,19 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.client.rest.RestClient; -import org.apache.hadoop.ozone.client.rpc.RpcClient; import org.apache.hadoop.test.GenericTestUtils; + +import com.google.common.base.Strings; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION; import org.junit.After; import org.junit.AfterClass; import org.junit.Assert; +import static org.junit.Assert.fail; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; import org.junit.rules.Timeout; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine; @@ -53,13 +50,9 @@ import picocli.CommandLine.ParseResult; import picocli.CommandLine.RunLast; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION; -import static org.junit.Assert.fail; - /** * This test class specified for testing Ozone datanode shell command. */ -@RunWith(value = Parameterized.class) public class TestOzoneDatanodeShell { private static final Logger LOG = @@ -81,17 +74,6 @@ public class TestOzoneDatanodeShell { private static final PrintStream OLD_OUT = System.out; private static final PrintStream OLD_ERR = System.err; - @Parameterized.Parameters - public static Collection clientProtocol() { - Object[][] params = new Object[][]{ - {RpcClient.class}, - {RestClient.class}}; - return Arrays.asList(params); - } - - @Parameterized.Parameter - @SuppressWarnings("visibilitymodifier") - public Class clientProtocol; /** * Create a MiniDFSCluster for testing with using distributed Ozone * handler type. @@ -103,7 +85,7 @@ public static void init() throws Exception { conf = new OzoneConfiguration(); String path = GenericTestUtils.getTempPath( - TestOzoneShell.class.getSimpleName()); + TestOzoneDatanodeShell.class.getSimpleName()); baseDir = new File(path); baseDir.mkdirs(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java deleted file mode 100644 index bdcb307196f..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java +++ /dev/null @@ -1,1266 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.ozShell; - -import java.io.ByteArrayOutputStream; -import java.io.EOFException; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.PrintStream; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Objects; -import java.util.Random; -import java.util.UUID; - -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.cli.MissingSubcommandException; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.tracing.StringCodec; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneKey; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.VolumeArgs; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.ozone.client.rest.OzoneException; -import org.apache.hadoop.ozone.client.rpc.RpcClient; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; -import org.apache.hadoop.ozone.om.helpers.ServiceInfo; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServicePort; -import org.apache.hadoop.ozone.web.ozShell.OzoneShell; -import org.apache.hadoop.ozone.web.ozShell.Shell; -import org.apache.hadoop.ozone.web.request.OzoneQuota; -import org.apache.hadoop.ozone.web.response.BucketInfo; -import org.apache.hadoop.ozone.web.response.KeyInfo; -import org.apache.hadoop.ozone.web.response.VolumeInfo; -import org.apache.hadoop.ozone.web.utils.JsonUtils; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.test.GenericTestUtils; - -import com.google.common.base.Strings; -import org.apache.commons.lang3.RandomStringUtils; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Assert; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.slf4j.event.Level.TRACE; - -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import picocli.CommandLine; -import picocli.CommandLine.ExecutionException; -import picocli.CommandLine.IExceptionHandler2; -import picocli.CommandLine.ParameterException; -import picocli.CommandLine.ParseResult; -import picocli.CommandLine.RunLast; - -/** - * This test class specified for testing Ozone shell command. - */ -public class TestOzoneShell { - - private static final Logger LOG = - LoggerFactory.getLogger(TestOzoneShell.class); - - /** - * Set the timeout for every test. - */ - @Rule - public Timeout testTimeout = new Timeout(300000); - - private static String url; - private static File baseDir; - private static OzoneConfiguration conf = null; - private static MiniOzoneCluster cluster = null; - private static ClientProtocol client = null; - private static Shell shell = null; - - private final ByteArrayOutputStream out = new ByteArrayOutputStream(); - private final ByteArrayOutputStream err = new ByteArrayOutputStream(); - private static final PrintStream OLD_OUT = System.out; - private static final PrintStream OLD_ERR = System.err; - - /** - * Create a MiniDFSCluster for testing with using distributed Ozone - * handler type. - * - * @throws Exception - */ - @BeforeClass - public static void init() throws Exception { - conf = new OzoneConfiguration(); - - String path = GenericTestUtils.getTempPath( - TestOzoneShell.class.getSimpleName()); - baseDir = new File(path); - baseDir.mkdirs(); - - shell = new OzoneShell(); - - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(3) - .build(); - conf.setInt(OZONE_REPLICATION, ReplicationFactor.THREE.getValue()); - conf.setQuietMode(false); - client = new RpcClient(conf); - cluster.waitForClusterToBeReady(); - } - - /** - * shutdown MiniDFSCluster. - */ - @AfterClass - public static void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - - if (baseDir != null) { - FileUtil.fullyDelete(baseDir, true); - } - } - - @Before - public void setup() { - System.setOut(new PrintStream(out)); - System.setErr(new PrintStream(err)); - url = "o3://" + getOmAddress(); - } - - @After - public void reset() { - // reset stream after each unit test - out.reset(); - err.reset(); - - // restore system streams - System.setOut(OLD_OUT); - System.setErr(OLD_ERR); - } - - @Test - public void testCreateVolume() throws Exception { - LOG.info("Running testCreateVolume"); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - testCreateVolume(volumeName, ""); - volumeName = "volume" + RandomStringUtils.randomNumeric(5); - testCreateVolume("/////" + volumeName, ""); - testCreateVolume("/////", "Volume name is required"); - testCreateVolume("/////vol/123", - "Invalid volume name. Delimiters (/) not allowed in volume name"); - } - - private void testCreateVolume(String volumeName, String errorMsg) - throws Exception { - err.reset(); - String userName = "bilbo"; - String[] args = new String[] {"volume", "create", url + "/" + volumeName, - "--user", userName, "--root"}; - - if (Strings.isNullOrEmpty(errorMsg)) { - execute(shell, args); - - } else { - executeWithError(shell, args, errorMsg); - return; - } - - String truncatedVolumeName = - volumeName.substring(volumeName.lastIndexOf('/') + 1); - OzoneVolume volumeInfo = client.getVolumeDetails(truncatedVolumeName); - assertEquals(truncatedVolumeName, volumeInfo.getName()); - assertEquals(userName, volumeInfo.getOwner()); - } - - private void execute(Shell ozoneShell, String[] args) { - LOG.info("Executing shell command with args {}", Arrays.asList(args)); - CommandLine cmd = ozoneShell.getCmd(); - - IExceptionHandler2> exceptionHandler = - new IExceptionHandler2>() { - @Override - public List handleParseException(ParameterException ex, - String[] args) { - throw ex; - } - - @Override - public List handleExecutionException(ExecutionException ex, - ParseResult parseResult) { - throw ex; - } - }; - cmd.parseWithHandlers(new RunLast(), - exceptionHandler, args); - } - - /** - * Test to create volume without specifying --user or -u. - * @throws Exception - */ - @Test - public void testCreateVolumeWithoutUser() throws Exception { - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String[] args = new String[] {"volume", "create", url + "/" + volumeName, - "--root"}; - - execute(shell, args); - - String truncatedVolumeName = - volumeName.substring(volumeName.lastIndexOf('/') + 1); - OzoneVolume volumeInfo = client.getVolumeDetails(truncatedVolumeName); - assertEquals(truncatedVolumeName, volumeInfo.getName()); - assertEquals(UserGroupInformation.getCurrentUser().getUserName(), - volumeInfo.getOwner()); - } - - @Test - public void testDeleteVolume() throws Exception { - LOG.info("Running testDeleteVolume"); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - VolumeArgs volumeArgs = VolumeArgs.newBuilder() - .setOwner("bilbo") - .setQuota("100TB") - .build(); - client.createVolume(volumeName, volumeArgs); - OzoneVolume volume = client.getVolumeDetails(volumeName); - assertNotNull(volume); - - String[] args = new String[] {"volume", "delete", url + "/" + volumeName}; - execute(shell, args); - String output = out.toString(); - assertTrue(output.contains("Volume " + volumeName + " is deleted")); - - // verify if volume has been deleted - try { - client.getVolumeDetails(volumeName); - fail("Get volume call should have thrown."); - } catch (OMException e) { - Assert.assertEquals(VOLUME_NOT_FOUND, e.getResult()); - } - - - volumeName = "volume" + RandomStringUtils.randomNumeric(5); - volumeArgs = VolumeArgs.newBuilder() - .setOwner("bilbo") - .setQuota("100TB") - .build(); - client.createVolume(volumeName, volumeArgs); - volume = client.getVolumeDetails(volumeName); - assertNotNull(volume); - - //volumeName prefixed with / - String volumeNameWithSlashPrefix = "/" + volumeName; - args = new String[] {"volume", "delete", - url + "/" + volumeNameWithSlashPrefix}; - execute(shell, args); - output = out.toString(); - assertTrue(output.contains("Volume " + volumeName + " is deleted")); - - // verify if volume has been deleted - try { - client.getVolumeDetails(volumeName); - fail("Get volume call should have thrown."); - } catch (OMException e) { - Assert.assertEquals(VOLUME_NOT_FOUND, e.getResult()); - } - } - - @Test - public void testInfoVolume() throws Exception { - LOG.info("Running testInfoVolume"); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - VolumeArgs volumeArgs = VolumeArgs.newBuilder() - .setOwner("bilbo") - .setQuota("100TB") - .build(); - client.createVolume(volumeName, volumeArgs); - - //volumeName supplied as-is - String[] args = new String[] {"volume", "info", url + "/" + volumeName}; - execute(shell, args); - - String output = out.toString(); - assertTrue(output.contains(volumeName)); - assertTrue(output.contains("createdOn") - && output.contains(OzoneConsts.OZONE_TIME_ZONE)); - - //volumeName prefixed with / - String volumeNameWithSlashPrefix = "/" + volumeName; - args = new String[] {"volume", "info", - url + "/" + volumeNameWithSlashPrefix}; - execute(shell, args); - - output = out.toString(); - assertTrue(output.contains(volumeName)); - assertTrue(output.contains("createdOn") - && output.contains(OzoneConsts.OZONE_TIME_ZONE)); - - // test infoVolume with invalid volume name - args = new String[] {"volume", "info", - url + "/" + volumeName + "/invalid-name"}; - executeWithError(shell, args, "Invalid volume name. " + - "Delimiters (/) not allowed in volume name"); - - // get info for non-exist volume - args = new String[] {"volume", "info", url + "/invalid-volume"}; - executeWithError(shell, args, VOLUME_NOT_FOUND); - } - - @Test - public void testShellIncompleteCommand() throws Exception { - LOG.info("Running testShellIncompleteCommand"); - String expectedError = "Incomplete command"; - String[] args = new String[] {}; //executing 'ozone sh' - - executeWithError(shell, args, expectedError); - - args = new String[] {"volume"}; //executing 'ozone sh volume' - executeWithError(shell, args, MissingSubcommandException.class, - expectedError); - - args = new String[] {"bucket"}; //executing 'ozone sh bucket' - executeWithError(shell, args, MissingSubcommandException.class, - expectedError); - - args = new String[] {"key"}; //executing 'ozone sh key' - executeWithError(shell, args, MissingSubcommandException.class, - expectedError); - } - - @Test - public void testUpdateVolume() throws Exception { - LOG.info("Running testUpdateVolume"); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String userName = "bilbo"; - VolumeArgs volumeArgs = VolumeArgs.newBuilder() - .setOwner("bilbo") - .setQuota("100TB") - .build(); - client.createVolume(volumeName, volumeArgs); - OzoneVolume vol = client.getVolumeDetails(volumeName); - assertEquals(userName, vol.getOwner()); - assertEquals(OzoneQuota.parseQuota("100TB").sizeInBytes(), vol.getQuota()); - - String[] args = new String[] {"volume", "update", url + "/" + volumeName, - "--quota", "500MB"}; - execute(shell, args); - vol = client.getVolumeDetails(volumeName); - assertEquals(userName, vol.getOwner()); - assertEquals(OzoneQuota.parseQuota("500MB").sizeInBytes(), vol.getQuota()); - - String newUser = "new-user"; - args = new String[] {"volume", "update", url + "/" + volumeName, - "--user", newUser}; - execute(shell, args); - vol = client.getVolumeDetails(volumeName); - assertEquals(newUser, vol.getOwner()); - - //volume with / prefix - String volumeWithPrefix = "/" + volumeName; - String newUser2 = "new-user2"; - args = new String[] {"volume", "update", url + "/" + volumeWithPrefix, - "--user", newUser2}; - execute(shell, args); - vol = client.getVolumeDetails(volumeName); - assertEquals(newUser2, vol.getOwner()); - - // test error conditions - args = new String[] {"volume", "update", url + "/invalid-volume", - "--user", newUser}; - executeWithError(shell, args, ResultCodes.VOLUME_NOT_FOUND); - - err.reset(); - args = new String[] {"volume", "update", url + "/invalid-volume", - "--quota", "500MB"}; - executeWithError(shell, args, ResultCodes.VOLUME_NOT_FOUND); - } - - /** - * Execute command, assert exception message and returns true if error - * was thrown. - */ - private void executeWithError(Shell ozoneShell, String[] args, - OMException.ResultCodes code) { - - try { - execute(ozoneShell, args); - fail("Exception is expected from command execution " + Arrays - .asList(args)); - } catch (Exception ex) { - Assert.assertEquals(OMException.class, ex.getCause().getClass()); - Assert.assertEquals(code, ((OMException) ex.getCause()).getResult()); - } - - } - - /** - * Execute command, assert exception message and returns true if error - * was thrown. - */ - private void executeWithError(Shell ozoneShell, String[] args, - String expectedError) { - if (Strings.isNullOrEmpty(expectedError)) { - execute(ozoneShell, args); - } else { - try { - execute(ozoneShell, args); - fail("Exception is expected from command execution " + Arrays - .asList(args)); - } catch (Exception ex) { - if (!Strings.isNullOrEmpty(expectedError)) { - Throwable exceptionToCheck = ex; - if (exceptionToCheck.getCause() != null) { - exceptionToCheck = exceptionToCheck.getCause(); - } - Assert.assertTrue( - String.format( - "Error of shell code doesn't contain the " + - "exception [%s] in [%s]", - expectedError, exceptionToCheck.getMessage()), - exceptionToCheck.getMessage().contains(expectedError)); - } - } - } - } - - /** - * Execute command, assert exception message and exception class - * and returns true if error was thrown. - */ - private void executeWithError(Shell ozoneShell, String[] args, - Class expectedException, String expectedError) { - if (Strings.isNullOrEmpty(expectedError)) { - execute(ozoneShell, args); - } else { - try { - execute(ozoneShell, args); - fail("Exception is expected from command execution " + Arrays - .asList(args)); - } catch (Exception ex) { - if (!Strings.isNullOrEmpty(expectedError)) { - Throwable exceptionToCheck = ex; - if (exceptionToCheck.getCause() != null) { - exceptionToCheck = exceptionToCheck.getCause(); - } - Assert.assertTrue( - String.format( - "Error of shell code doesn't contain the " + - "expectedException [%s] in [%s]", - expectedError, exceptionToCheck.getMessage()), - exceptionToCheck.getMessage().contains(expectedError)); - assertTrue(ex.getClass().getCanonicalName() - .equals(expectedException.getCanonicalName())); - } - } - } - } - - /** - * Execute command, assert exception cause message and returns true if error - * was thrown. - */ - private void executeWithError(Shell ozoneShell, String[] args, - Class expectedCause) { - if (Objects.isNull(expectedCause)) { - execute(ozoneShell, args); - } else { - try { - execute(ozoneShell, args); - fail("Exception is expected from command execution " + Arrays - .asList(args)); - } catch (Exception ex) { - LOG.error("Exception: ", ex); - assertTrue(ex.getCause().getClass().getCanonicalName() - .equals(expectedCause.getCanonicalName())); - } - } - } - - @Test - public void testListVolume() throws Exception { - LOG.info("Running testListVolume"); - String protocol = "rpcclient"; - String commandOutput, commandError; - List volumes; - final int volCount = 20; - final String user1 = "test-user-a-" + protocol; - final String user2 = "test-user-b-" + protocol; - - // Create 20 volumes, 10 for user1 and another 10 for user2. - for (int x = 0; x < volCount; x++) { - String volumeName; - String userName; - - if (x % 2 == 0) { - // create volume [test-vol0, test-vol2, ..., test-vol18] for user1 - userName = user1; - volumeName = "test-vol-" + protocol + x; - } else { - // create volume [test-vol1, test-vol3, ..., test-vol19] for user2 - userName = user2; - volumeName = "test-vol-" + protocol + x; - } - VolumeArgs volumeArgs = VolumeArgs.newBuilder() - .setOwner(userName) - .setQuota("100TB") - .build(); - client.createVolume(volumeName, volumeArgs); - OzoneVolume vol = client.getVolumeDetails(volumeName); - assertNotNull(vol); - } - - String[] args = new String[] {"volume", "list", url + "/abcde", "--user", - user1, "--length", "100"}; - executeWithError(shell, args, "Invalid URI"); - - err.reset(); - // test -length option - args = new String[] {"volume", "list", url + "/", "--user", - user1, "--length", "100"}; - execute(shell, args); - commandOutput = out.toString(); - volumes = (List) JsonUtils - .toJsonList(commandOutput, VolumeInfo.class); - - assertEquals(10, volumes.size()); - for (VolumeInfo volume : volumes) { - assertEquals(volume.getOwner().getName(), user1); - assertTrue(volume.getCreatedOn().contains(OzoneConsts.OZONE_TIME_ZONE)); - } - - out.reset(); - args = new String[] {"volume", "list", url + "/", "--user", - user1, "--length", "2"}; - execute(shell, args); - commandOutput = out.toString(); - volumes = (List) JsonUtils - .toJsonList(commandOutput, VolumeInfo.class); - - assertEquals(2, volumes.size()); - - // test --prefix option - out.reset(); - args = - new String[] {"volume", "list", url + "/", "--user", user1, "--length", - "100", "--prefix", "test-vol-" + protocol + "1"}; - execute(shell, args); - commandOutput = out.toString(); - volumes = (List) JsonUtils - .toJsonList(commandOutput, VolumeInfo.class); - - assertEquals(5, volumes.size()); - // return volume names should be [test-vol10, test-vol12, ..., test-vol18] - for (int i = 0; i < volumes.size(); i++) { - assertEquals(volumes.get(i).getVolumeName(), - "test-vol-" + protocol + ((i + 5) * 2)); - assertEquals(volumes.get(i).getOwner().getName(), user1); - } - - // test -start option - out.reset(); - args = - new String[] {"volume", "list", url + "/", "--user", user2, "--length", - "100", "--start", "test-vol-" + protocol + "15"}; - execute(shell, args); - commandOutput = out.toString(); - volumes = (List) JsonUtils - .toJsonList(commandOutput, VolumeInfo.class); - - assertEquals(2, volumes.size()); - - assertEquals(volumes.get(0).getVolumeName(), "test-vol-" + protocol + "17"); - assertEquals(volumes.get(1).getVolumeName(), "test-vol-" + protocol + "19"); - assertEquals(volumes.get(0).getOwner().getName(), user2); - assertEquals(volumes.get(1).getOwner().getName(), user2); - - // test error conditions - err.reset(); - args = new String[] {"volume", "list", url + "/", "--user", - user2, "--length", "-1"}; - executeWithError(shell, args, "the length should be a positive number"); - - err.reset(); - args = new String[] {"volume", "list", url + "/", "--user", - user2, "--length", "invalid-length"}; - executeWithError(shell, args, "Invalid value for option " + - "'--length': 'invalid-length' is not an int"); - } - - @Test - public void testCreateBucket() throws Exception { - LOG.info("Running testCreateBucket"); - OzoneVolume vol = creatVolume(); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - String[] args = new String[] {"bucket", "create", - url + "/" + vol.getName() + "/" + bucketName}; - - execute(shell, args); - OzoneBucket bucketInfo = vol.getBucket(bucketName); - assertEquals(vol.getName(), - bucketInfo.getVolumeName()); - assertEquals(bucketName, bucketInfo.getName()); - - // test create a bucket in a non-exist volume - args = new String[] {"bucket", "create", - url + "/invalid-volume/" + bucketName}; - executeWithError(shell, args, VOLUME_NOT_FOUND); - - // test createBucket with invalid bucket name - args = new String[] {"bucket", "create", - url + "/" + vol.getName() + "/" + bucketName + "/invalid-name"}; - executeWithError(shell, args, - "Invalid bucket name. Delimiters (/) not allowed in bucket name"); - } - - @Test - public void testDeleteBucket() throws Exception { - LOG.info("Running testDeleteBucket"); - OzoneVolume vol = creatVolume(); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - vol.createBucket(bucketName); - OzoneBucket bucketInfo = vol.getBucket(bucketName); - assertNotNull(bucketInfo); - - String[] args = new String[] {"bucket", "delete", - url + "/" + vol.getName() + "/" + bucketName}; - execute(shell, args); - - // verify if bucket has been deleted in volume - try { - vol.getBucket(bucketName); - fail("Get bucket should have thrown."); - } catch (OMException e) { - Assert.assertEquals(BUCKET_NOT_FOUND, e.getResult()); - } - - // test delete bucket in a non-exist volume - args = new String[] {"bucket", "delete", - url + "/invalid-volume" + "/" + bucketName}; - executeWithError(shell, args, VOLUME_NOT_FOUND); - - err.reset(); - // test delete non-exist bucket - args = new String[] {"bucket", "delete", - url + "/" + vol.getName() + "/invalid-bucket"}; - executeWithError(shell, args, BUCKET_NOT_FOUND); - } - - @Test - public void testInfoBucket() throws Exception { - LOG.info("Running testInfoBucket"); - OzoneVolume vol = creatVolume(); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - vol.createBucket(bucketName); - - String[] args = new String[] {"bucket", "info", - url + "/" + vol.getName() + "/" + bucketName}; - execute(shell, args); - - String output = out.toString(); - assertTrue(output.contains(bucketName)); - assertTrue(output.contains("createdOn") - && output.contains(OzoneConsts.OZONE_TIME_ZONE)); - - // test infoBucket with invalid bucket name - args = new String[] {"bucket", "info", - url + "/" + vol.getName() + "/" + bucketName + "/invalid-name"}; - executeWithError(shell, args, - "Invalid bucket name. Delimiters (/) not allowed in bucket name"); - - // test get info from a non-exist bucket - args = new String[] {"bucket", "info", - url + "/" + vol.getName() + "/invalid-bucket" + bucketName}; - executeWithError(shell, args, - ResultCodes.BUCKET_NOT_FOUND); - } - - @Test - public void testListBucket() throws Exception { - LOG.info("Running testListBucket"); - List buckets; - String commandOutput; - int bucketCount = 11; - OzoneVolume vol = creatVolume(); - - List bucketNames = new ArrayList<>(); - // create bucket from test-bucket0 to test-bucket10 - for (int i = 0; i < bucketCount; i++) { - String name = "test-bucket" + i; - bucketNames.add(name); - vol.createBucket(name); - OzoneBucket bucket = vol.getBucket(name); - assertNotNull(bucket); - } - - // test listBucket with invalid volume name - String[] args = new String[] {"bucket", "list", - url + "/" + vol.getName() + "/invalid-name"}; - executeWithError(shell, args, "Invalid volume name. " + - "Delimiters (/) not allowed in volume name"); - - // test -length option - args = new String[] {"bucket", "list", - url + "/" + vol.getName(), "--length", "100"}; - execute(shell, args); - commandOutput = out.toString(); - buckets = (List) JsonUtils.toJsonList(commandOutput, - BucketInfo.class); - - assertEquals(11, buckets.size()); - // sort bucket names since the return buckets isn't in created order - Collections.sort(bucketNames); - // return bucket names should be [test-bucket0, test-bucket1, - // test-bucket10, test-bucket2, ,..., test-bucket9] - for (int i = 0; i < buckets.size(); i++) { - assertEquals(buckets.get(i).getBucketName(), bucketNames.get(i)); - assertEquals(buckets.get(i).getVolumeName(), vol.getName()); - assertTrue(buckets.get(i).getCreatedOn() - .contains(OzoneConsts.OZONE_TIME_ZONE)); - } - - out.reset(); - args = new String[] {"bucket", "list", url + "/" + vol.getName(), - "--length", "3"}; - execute(shell, args); - commandOutput = out.toString(); - buckets = (List) JsonUtils.toJsonList(commandOutput, - BucketInfo.class); - - assertEquals(3, buckets.size()); - // return bucket names should be [test-bucket0, - // test-bucket1, test-bucket10] - assertEquals(buckets.get(0).getBucketName(), "test-bucket0"); - assertEquals(buckets.get(1).getBucketName(), "test-bucket1"); - assertEquals(buckets.get(2).getBucketName(), "test-bucket10"); - - // test --prefix option - out.reset(); - args = new String[] {"bucket", "list", url + "/" + vol.getName(), - "--length", "100", "--prefix", "test-bucket1"}; - execute(shell, args); - commandOutput = out.toString(); - buckets = (List) JsonUtils.toJsonList(commandOutput, - BucketInfo.class); - - assertEquals(2, buckets.size()); - // return bucket names should be [test-bucket1, test-bucket10] - assertEquals(buckets.get(0).getBucketName(), "test-bucket1"); - assertEquals(buckets.get(1).getBucketName(), "test-bucket10"); - - // test -start option - out.reset(); - args = new String[] {"bucket", "list", url + "/" + vol.getName(), - "--length", "100", "--start", "test-bucket7"}; - execute(shell, args); - commandOutput = out.toString(); - buckets = (List) JsonUtils.toJsonList(commandOutput, - BucketInfo.class); - - assertEquals(2, buckets.size()); - assertEquals(buckets.get(0).getBucketName(), "test-bucket8"); - assertEquals(buckets.get(1).getBucketName(), "test-bucket9"); - - // test error conditions - err.reset(); - args = new String[] {"bucket", "list", url + "/" + vol.getName(), - "--length", "-1"}; - executeWithError(shell, args, "the length should be a positive number"); - } - - @Test - public void testPutKey() throws Exception { - LOG.info("Running testPutKey"); - OzoneBucket bucket = creatBucket(); - String volumeName = bucket.getVolumeName(); - String bucketName = bucket.getName(); - String keyName = "key" + RandomStringUtils.randomNumeric(5); - - String[] args = new String[] {"key", "put", - url + "/" + volumeName + "/" + bucketName + "/" + keyName, - createTmpFile()}; - execute(shell, args); - - OzoneKey keyInfo = bucket.getKey(keyName); - assertEquals(keyName, keyInfo.getName()); - - // test put key in a non-exist bucket - args = new String[] {"key", "put", - url + "/" + volumeName + "/invalid-bucket/" + keyName, - createTmpFile()}; - executeWithError(shell, args, BUCKET_NOT_FOUND); - } - - @Test - public void testGetKey() throws Exception { - GenericTestUtils.LogCapturer logs = GenericTestUtils.LogCapturer - .captureLogs(StringCodec.LOG); - GenericTestUtils.setLogLevel(StringCodec.LOG, TRACE); - LOG.info("Running testGetKey"); - String keyName = "key" + RandomStringUtils.randomNumeric(5); - OzoneBucket bucket = creatBucket(); - String volumeName = bucket.getVolumeName(); - String bucketName = bucket.getName(); - - String dataStr = "test-data"; - OzoneOutputStream keyOutputStream = - bucket.createKey(keyName, dataStr.length()); - keyOutputStream.write(dataStr.getBytes()); - keyOutputStream.close(); - assertFalse("put key without malformed tracing", - logs.getOutput().contains("MalformedTracerStateString")); - logs.clearOutput(); - - String tmpPath = baseDir.getAbsolutePath() + "/testfile-" - + UUID.randomUUID().toString(); - String[] args = new String[] {"key", "get", - url + "/" + volumeName + "/" + bucketName + "/" + keyName, - tmpPath}; - execute(shell, args); - assertFalse("get key without malformed tracing", - logs.getOutput().contains("MalformedTracerStateString")); - logs.clearOutput(); - - byte[] dataBytes = new byte[dataStr.length()]; - try (FileInputStream randFile = new FileInputStream(new File(tmpPath))) { - randFile.read(dataBytes); - } - assertEquals(dataStr, DFSUtil.bytes2String(dataBytes)); - - tmpPath = baseDir.getAbsolutePath() + File.separatorChar + keyName; - args = new String[] {"key", "get", - url + "/" + volumeName + "/" + bucketName + "/" + keyName, - baseDir.getAbsolutePath()}; - execute(shell, args); - - dataBytes = new byte[dataStr.length()]; - try (FileInputStream randFile = new FileInputStream(new File(tmpPath))) { - randFile.read(dataBytes); - } - assertEquals(dataStr, DFSUtil.bytes2String(dataBytes)); - } - - @Test - public void testDeleteKey() throws Exception { - LOG.info("Running testDeleteKey"); - String keyName = "key" + RandomStringUtils.randomNumeric(5); - OzoneBucket bucket = creatBucket(); - String volumeName = bucket.getVolumeName(); - String bucketName = bucket.getName(); - String dataStr = "test-data"; - OzoneOutputStream keyOutputStream = - bucket.createKey(keyName, dataStr.length()); - keyOutputStream.write(dataStr.getBytes()); - keyOutputStream.close(); - - OzoneKey keyInfo = bucket.getKey(keyName); - assertEquals(keyName, keyInfo.getName()); - - String[] args = new String[] {"key", "delete", - url + "/" + volumeName + "/" + bucketName + "/" + keyName}; - execute(shell, args); - - // verify if key has been deleted in the bucket - assertKeyNotExists(bucket, keyName); - - // test delete key in a non-exist bucket - args = new String[] {"key", "delete", - url + "/" + volumeName + "/invalid-bucket/" + keyName}; - executeWithError(shell, args, BUCKET_NOT_FOUND); - - err.reset(); - // test delete a non-exist key in bucket - args = new String[] {"key", "delete", - url + "/" + volumeName + "/" + bucketName + "/invalid-key"}; - executeWithError(shell, args, KEY_NOT_FOUND); - } - - @Test - public void testRenameKey() throws Exception { - LOG.info("Running testRenameKey"); - OzoneBucket bucket = creatBucket(); - OzoneKey oldKey = createTestKey(bucket); - - String oldName = oldKey.getName(); - String newName = oldName + ".new"; - String[] args = new String[]{ - "key", "rename", - String.format("%s/%s/%s", - url, oldKey.getVolumeName(), oldKey.getBucketName()), - oldName, - newName - }; - execute(shell, args); - - OzoneKey newKey = bucket.getKey(newName); - assertEquals(oldKey.getCreationTime(), newKey.getCreationTime()); - assertEquals(oldKey.getDataSize(), newKey.getDataSize()); - assertKeyNotExists(bucket, oldName); - } - - @Test - public void testInfoKeyDetails() throws Exception { - LOG.info("Running testInfoKey"); - String keyName = "key" + RandomStringUtils.randomNumeric(5); - OzoneBucket bucket = creatBucket(); - String volumeName = bucket.getVolumeName(); - String bucketName = bucket.getName(); - String dataStr = "test-data"; - OzoneOutputStream keyOutputStream = - bucket.createKey(keyName, dataStr.length()); - keyOutputStream.write(dataStr.getBytes()); - keyOutputStream.close(); - - String[] args = new String[] {"key", "info", - url + "/" + volumeName + "/" + bucketName + "/" + keyName}; - - // verify the response output - execute(shell, args); - String output = out.toString(); - - assertTrue(output.contains(keyName)); - assertTrue( - output.contains("createdOn") && output.contains("modifiedOn") && output - .contains(OzoneConsts.OZONE_TIME_ZONE)); - assertTrue( - output.contains("containerID") && output.contains("localID") && output - .contains("length") && output.contains("offset")); - // reset stream - out.reset(); - err.reset(); - - // get the info of a non-exist key - args = new String[] {"key", "info", - url + "/" + volumeName + "/" + bucketName + "/invalid-key"}; - - // verify the response output - // get the non-exist key info should be failed - executeWithError(shell, args, KEY_NOT_FOUND); - } - - @Test - public void testInfoDirKey() throws Exception { - LOG.info("Running testInfoKey for Dir Key"); - String dirKeyName = "test/"; - String keyNameOnly = "test"; - OzoneBucket bucket = creatBucket(); - String volumeName = bucket.getVolumeName(); - String bucketName = bucket.getName(); - String dataStr = "test-data"; - OzoneOutputStream keyOutputStream = - bucket.createKey(dirKeyName, dataStr.length()); - keyOutputStream.write(dataStr.getBytes()); - keyOutputStream.close(); - String[] args = new String[] {"key", "info", - url + "/" + volumeName + "/" + bucketName + "/" + dirKeyName}; - // verify the response output - execute(shell, args); - String output = out.toString(); - assertTrue(output.contains(dirKeyName)); - assertTrue(output.contains("createdOn") && - output.contains("modifiedOn") && - output.contains(OzoneConsts.OZONE_TIME_ZONE)); - args = new String[] {"key", "info", - url + "/" + volumeName + "/" + bucketName + "/" + keyNameOnly}; - executeWithError(shell, args, KEY_NOT_FOUND); - out.reset(); - err.reset(); - } - - @Test - public void testListKey() throws Exception { - LOG.info("Running testListKey"); - String commandOutput; - List keys; - int keyCount = 11; - OzoneBucket bucket = creatBucket(); - String volumeName = bucket.getVolumeName(); - String bucketName = bucket.getName(); - - String keyName; - List keyNames = new ArrayList<>(); - for (int i = 0; i < keyCount; i++) { - keyName = "test-key" + i; - keyNames.add(keyName); - String dataStr = "test-data"; - OzoneOutputStream keyOutputStream = - bucket.createKey(keyName, dataStr.length()); - keyOutputStream.write(dataStr.getBytes()); - keyOutputStream.close(); - } - - // test listKey with invalid bucket name - String[] args = new String[] {"key", "list", - url + "/" + volumeName + "/" + bucketName + "/invalid-name"}; - executeWithError(shell, args, "Invalid bucket name. " + - "Delimiters (/) not allowed in bucket name"); - - // test -length option - args = new String[] {"key", "list", - url + "/" + volumeName + "/" + bucketName, "--length", "100"}; - execute(shell, args); - commandOutput = out.toString(); - keys = (List) JsonUtils.toJsonList(commandOutput, - KeyInfo.class); - - assertEquals(11, keys.size()); - // sort key names since the return keys isn't in created order - Collections.sort(keyNames); - // return key names should be [test-key0, test-key1, - // test-key10, test-key2, ,..., test-key9] - for (int i = 0; i < keys.size(); i++) { - assertEquals(keys.get(i).getKeyName(), keyNames.get(i)); - // verify the creation/modification time of key - assertTrue(keys.get(i).getCreatedOn() - .contains(OzoneConsts.OZONE_TIME_ZONE)); - assertTrue(keys.get(i).getModifiedOn() - .contains(OzoneConsts.OZONE_TIME_ZONE)); - } - - out.reset(); - String msgText = "Listing first 3 entries of the result. " + - "Use --length (-l) to override max returned keys."; - args = - new String[] {"key", "list", url + "/" + volumeName + "/" + bucketName, - "--length", "3"}; - execute(shell, args); - commandOutput = out.toString(); - assertTrue("Expecting output to start with " + msgText, - commandOutput.contains(msgText)); - commandOutput = commandOutput.replace(msgText, ""); - keys = (List) JsonUtils.toJsonList(commandOutput, - KeyInfo.class); - - assertEquals(3, keys.size()); - // return key names should be [test-key0, test-key1, test-key10] - assertEquals(keys.get(0).getKeyName(), "test-key0"); - assertEquals(keys.get(1).getKeyName(), "test-key1"); - assertEquals(keys.get(2).getKeyName(), "test-key10"); - - // test --prefix option - out.reset(); - args = - new String[] {"key", "list", url + "/" + volumeName + "/" + bucketName, - "--length", "100", "--prefix", "test-key1"}; - execute(shell, args); - commandOutput = out.toString(); - keys = (List) JsonUtils.toJsonList(commandOutput, - KeyInfo.class); - - assertEquals(2, keys.size()); - // return key names should be [test-key1, test-key10] - assertEquals(keys.get(0).getKeyName(), "test-key1"); - assertEquals(keys.get(1).getKeyName(), "test-key10"); - - // test -start option - out.reset(); - args = - new String[] {"key", "list", url + "/" + volumeName + "/" + bucketName, - "--length", "100", "--start", "test-key7"}; - execute(shell, args); - commandOutput = out.toString(); - keys = (List) JsonUtils.toJsonList(commandOutput, - KeyInfo.class); - - assertEquals(keys.get(0).getKeyName(), "test-key8"); - assertEquals(keys.get(1).getKeyName(), "test-key9"); - - // test error conditions - err.reset(); - args = - new String[] {"key", "list", url + "/" + volumeName + "/" + bucketName, - "--length", "-1"}; - executeWithError(shell, args, "the length should be a positive number"); - } - - private OzoneVolume creatVolume() throws OzoneException, IOException { - String volumeName = RandomStringUtils.randomNumeric(5) + "volume"; - VolumeArgs volumeArgs = VolumeArgs.newBuilder() - .setOwner("bilbo") - .setQuota("100TB") - .build(); - try { - client.createVolume(volumeName, volumeArgs); - } catch (Exception ex) { - Assert.assertEquals("PartialGroupNameException", - ex.getCause().getClass().getSimpleName()); - } - OzoneVolume volume = client.getVolumeDetails(volumeName); - - return volume; - } - - private OzoneBucket creatBucket() throws OzoneException, IOException { - OzoneVolume vol = creatVolume(); - String bucketName = RandomStringUtils.randomNumeric(5) + "bucket"; - vol.createBucket(bucketName); - OzoneBucket bucketInfo = vol.getBucket(bucketName); - - return bucketInfo; - } - - private OzoneKey createTestKey(OzoneBucket bucket) throws IOException { - String key = "key" + RandomStringUtils.randomNumeric(5); - String value = "value"; - - OzoneOutputStream keyOutputStream = - bucket.createKey(key, value.length()); - keyOutputStream.write(value.getBytes()); - keyOutputStream.close(); - - return bucket.getKey(key); - } - - @Test - public void testTokenCommands() throws Exception { - String omAdd = "--set=" + OZONE_OM_ADDRESS_KEY + "=" + getOmAddress(); - List shellCommands = new ArrayList<>(4); - // Case 1: Execution will fail when security is disabled. - shellCommands.add(new String[]{omAdd, "token", "get"}); - shellCommands.add(new String[]{omAdd, "token", "renew"}); - shellCommands.add(new String[]{omAdd, "token", "cancel"}); - shellCommands.add(new String[]{omAdd, "token", "print"}); - shellCommands.forEach(cmd -> execute(cmd, "Error:Token operations " + - "work only")); - - String security = "-D=" + OZONE_SECURITY_ENABLED_KEY + "=true"; - - // Case 2: Execution of get token will fail when security is enabled but - // OzoneManager is not setup correctly. - execute(new String[]{omAdd, security, - "token", "get"}, "Error: Get delegation token operation failed."); - - // Clear all commands. - shellCommands.clear(); - - // Case 3: Execution of renew/cancel/print token will fail as token file - // doesn't exist. - shellCommands.add(new String[]{omAdd, security, "token", "renew"}); - shellCommands.add(new String[]{omAdd, security, "token", "cancel"}); - shellCommands.add(new String[]{omAdd, security, "token", "print"}); - shellCommands.forEach(cmd -> execute(cmd, "token " + - "operation failed as token file:")); - - // Create corrupt token file. - File testPath = GenericTestUtils.getTestDir(); - Files.createDirectories(testPath.toPath()); - Path tokenFile = Paths.get(testPath.toString(), "token.txt"); - String question = RandomStringUtils.random(100); - Files.write(tokenFile, question.getBytes()); - - // Clear all commands. - shellCommands.clear(); - String file = "-t=" + tokenFile.toString(); - - // Case 4: Execution of renew/cancel/print token will fail if token file - // is corrupt. - shellCommands.add(new String[]{omAdd, security, "token", "renew", file}); - shellCommands.add(new String[]{omAdd, security, "token", - "cancel", file}); - shellCommands.add(new String[]{omAdd, security, "token", "print", file}); - shellCommands.forEach(cmd -> executeWithError(shell, cmd, - EOFException.class)); - } - - private void execute(String[] cmd, String msg) { - // verify the response output - execute(shell, cmd); - String output = err.toString(); - assertTrue(output.contains(msg)); - // reset stream - out.reset(); - err.reset(); - } - - /** - * Create a temporary file used for putting key. - * @return the created file's path string - * @throws Exception - */ - private String createTmpFile() throws Exception { - // write a new file that used for putting key - File tmpFile = new File(baseDir, - "/testfile-" + UUID.randomUUID().toString()); - FileOutputStream randFile = new FileOutputStream(tmpFile); - Random r = new Random(); - for (int x = 0; x < 10; x++) { - char c = (char) (r.nextInt(26) + 'a'); - randFile.write(c); - } - randFile.close(); - - return tmpFile.getAbsolutePath(); - } - - private String getOmAddress() { - List services; - try { - services = cluster.getOzoneManager().getServiceList(); - } catch (IOException e) { - fail("Could not get service list from OM"); - return null; - } - - return services.stream() - .filter(a -> HddsProtos.NodeType.OM.equals(a.getNodeType())) - .findFirst() - .map(s -> s.getServiceAddress(ServicePort.Type.RPC)) - .orElseThrow(IllegalStateException::new); - } - - private static void assertKeyNotExists(OzoneBucket bucket, String keyName) - throws IOException { - try { - bucket.getKey(keyName); - fail(String.format("Key %s should not exist, but it does", keyName)); - } catch (OMException e) { - Assert.assertEquals(KEY_NOT_FOUND, e.getResult()); - } - } - -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java deleted file mode 100644 index e783dfd5718..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java +++ /dev/null @@ -1,221 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web; - -import static com.google.common.base.Charsets.UTF_8; -import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset; -import static org.apache.hadoop.ozone.OzoneConsts.CHUNK_SIZE; -import static org.junit.Assert.*; - -import org.apache.commons.io.IOUtils; -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.hdds.client.OzoneQuota; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.ozone.client.VolumeArgs; -import org.apache.hadoop.ozone.client.io.OzoneInputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.rpc.RpcClient; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.junit.rules.Timeout; - -import java.io.IOException; -import java.io.InputStream; -import java.util.HashMap; - -/** - * End-to-end testing of Ozone REST operations. - */ -public class TestOzoneRestWithMiniCluster { - /** - * Set the timeout for every test. - */ - @Rule - public Timeout testTimeout = new Timeout(300000); - - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf; - private static ClientProtocol client; - private static ReplicationFactor replicationFactor = ReplicationFactor.ONE; - private static ReplicationType replicationType = ReplicationType.RATIS; - - @Rule - public ExpectedException exception = ExpectedException.none(); - - @BeforeClass - public static void init() throws Exception { - conf = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(conf).build(); - cluster.waitForClusterToBeReady(); - client = new RpcClient(conf); - } - - @AfterClass - public static void shutdown() throws InterruptedException, IOException { - if (cluster != null) { - cluster.shutdown(); - } - client.close(); - } - - @Test - public void testCreateAndGetVolume() throws Exception { - createAndGetVolume(); - } - - @Test - public void testCreateAndGetBucket() throws Exception { - OzoneVolume volume = createAndGetVolume(); - createAndGetBucket(volume); - } - - @Test - public void testPutAndGetKey() throws Exception { - String keyName = nextId("key"); - String keyData = nextId("data"); - OzoneVolume volume = createAndGetVolume(); - OzoneBucket bucket = createAndGetBucket(volume); - putKey(bucket, keyName, keyData); - } - - private void putKey(OzoneBucket bucket, String keyName, String keyData) - throws IOException { - try ( - OzoneOutputStream ozoneOutputStream = bucket - .createKey(keyName, 0, replicationType, replicationFactor, - new HashMap<>()); - InputStream inputStream = IOUtils.toInputStream(keyData, UTF_8)) { - IOUtils.copy(inputStream, ozoneOutputStream); - } - try ( - InputStream inputStream = IOUtils.toInputStream(keyData, UTF_8); - OzoneInputStream ozoneInputStream = bucket.readKey(keyName)) { - IOUtils.contentEquals(ozoneInputStream, inputStream); - } - } - - @Test - public void testPutAndGetEmptyKey() throws Exception { - String keyName = nextId("key"); - String keyData = ""; - OzoneVolume volume = createAndGetVolume(); - OzoneBucket bucket = createAndGetBucket(volume); - putKey(bucket, keyName, keyData); - } - - @Test - public void testPutAndGetMultiChunkKey() throws Exception { - String keyName = nextId("key"); - int keyDataLen = 3 * CHUNK_SIZE; - String keyData = buildKeyData(keyDataLen); - OzoneVolume volume = createAndGetVolume(); - OzoneBucket bucket = createAndGetBucket(volume); - putKey(bucket, keyName, keyData); - } - - @Test - public void testPutAndGetMultiChunkKeyLastChunkPartial() throws Exception { - String keyName = nextId("key"); - int keyDataLen = (int)(2.5 * CHUNK_SIZE); - String keyData = buildKeyData(keyDataLen); - OzoneVolume volume = createAndGetVolume(); - OzoneBucket bucket = createAndGetBucket(volume); - putKey(bucket, keyName, keyData); - } - - @Test - public void testReplaceKey() throws Exception { - String keyName = nextId("key"); - int keyDataLen = (int)(2.5 * CHUNK_SIZE); - String keyData = buildKeyData(keyDataLen); - OzoneVolume volume = createAndGetVolume(); - OzoneBucket bucket = createAndGetBucket(volume); - putKey(bucket, keyName, keyData); - - // Replace key with data consisting of fewer chunks. - keyDataLen = (int)(1.5 * CHUNK_SIZE); - keyData = buildKeyData(keyDataLen); - putKey(bucket, keyName, keyData); - - // Replace key with data consisting of more chunks. - keyDataLen = (int)(3.5 * CHUNK_SIZE); - keyData = buildKeyData(keyDataLen); - putKey(bucket, keyName, keyData); - } - - private OzoneVolume createAndGetVolume() throws IOException { - String volumeName = nextId("volume"); - VolumeArgs volumeArgs = VolumeArgs.newBuilder() - .setOwner("bilbo") - .setQuota("100TB") - .setAdmin("hdfs") - .build(); - client.createVolume(volumeName, volumeArgs); - OzoneVolume volume = client.getVolumeDetails(volumeName); - assertEquals(volumeName, volume.getName()); - assertNotNull(volume); - assertEquals("bilbo", volume.getOwner()); - assertNotNull(volume.getQuota()); - assertEquals(OzoneQuota.parseQuota("100TB").sizeInBytes(), - volume.getQuota()); - return volume; - } - - private OzoneBucket createAndGetBucket(OzoneVolume vol) throws IOException { - String bucketName = nextId("bucket"); - vol.createBucket(bucketName); - OzoneBucket bucket = vol.getBucket(bucketName); - assertNotNull(bucket); - assertEquals(bucketName, bucket.getName()); - return bucket; - } - - /** - * Creates sample key data of the specified length. The data is a string of - * printable ASCII characters. This makes it easy to debug through visual - * inspection of the chunk files if a test fails. - * - * @param keyDataLen desired length of key data - * @return string of printable ASCII characters of the specified length - */ - private static String buildKeyData(int keyDataLen) { - return new String(dataset(keyDataLen, 33, 93), UTF_8); - } - - /** - * Generates identifiers unique enough for use in tests, so that individual - * tests don't collide on each others' data in the shared mini-cluster. - * - * @param idPrefix prefix to put in front of ID - * @return unique ID generated by appending a suffix to the given prefix - */ - private static String nextId(String idPrefix) { - return (idPrefix + RandomStringUtils.random(5, true, true)).toLowerCase(); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java deleted file mode 100644 index baf93a21e0e..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java +++ /dev/null @@ -1,187 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.TestOzoneHelper; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; -import org.junit.Ignore; -import org.junit.Rule; -import org.junit.BeforeClass; -import org.junit.AfterClass; -import org.junit.Test; -import org.junit.Assert; - -import org.junit.rules.Timeout; -import org.slf4j.LoggerFactory; - -import java.io.IOException; - -/** - * Test ozone volume in the distributed storage handler scenario. - */ -public class TestOzoneVolumes extends TestOzoneHelper { - private static final org.slf4j.Logger LOG = - LoggerFactory.getLogger(TestOzoneVolumes.class); - /** - * Set the timeout for every test. - */ - @Rule - public Timeout testTimeout = new Timeout(300000); - - private static MiniOzoneCluster cluster = null; - private static int port = 0; - - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ - @BeforeClass - public static void init() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG); - cluster = MiniOzoneCluster.newBuilder(conf).build(); - cluster.waitForClusterToBeReady(); - port = cluster.getHddsDatanodes().get(0) - .getDatanodeDetails() - .getPort(DatanodeDetails.Port.Name.REST).getValue(); - } - - /** - * Shutdown MiniDFSCluster. - */ - @AfterClass - public static void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - /** - * Creates Volumes on Ozone Store. - * - * @throws IOException - */ - @Test - public void testCreateVolumes() throws IOException { - super.testCreateVolumes(port); - Assert.assertEquals(0, cluster.getOzoneManager() - .getMetrics().getNumVolumeCreateFails()); - } - - /** - * Create Volumes with Quota. - * - * @throws IOException - */ - @Test - public void testCreateVolumesWithQuota() throws IOException { - super.testCreateVolumesWithQuota(port); - Assert.assertEquals(0, cluster.getOzoneManager() - .getMetrics().getNumVolumeCreateFails()); - } - - /** - * Create Volumes with Invalid Quota. - * - * @throws IOException - */ - @Test - public void testCreateVolumesWithInvalidQuota() throws IOException { - super.testCreateVolumesWithInvalidQuota(port); - Assert.assertEquals(0, cluster.getOzoneManager() - .getMetrics().getNumVolumeCreateFails()); - } - - /** - * To create a volume a user name must be specified using OZONE_USER header. - * This test verifies that we get an error in case we call without a OZONE - * user name. - * - * @throws IOException - */ - @Test - public void testCreateVolumesWithInvalidUser() throws IOException { - super.testCreateVolumesWithInvalidUser(port); - Assert.assertEquals(0, cluster.getOzoneManager() - .getMetrics().getNumVolumeCreateFails()); - } - - /** - * Only Admins can create volumes in Ozone. This test uses simple userauth as - * backend and hdfs and root are admin users in the simple backend. - *

- * This test tries to create a volume as user bilbo. - * - * @throws IOException - */ - @Test - public void testCreateVolumesWithOutAdminRights() throws IOException { - super.testCreateVolumesWithOutAdminRights(port); - Assert.assertEquals(0, cluster.getOzoneManager() - .getMetrics().getNumVolumeCreateFails()); - } - - /** - * Create a bunch of volumes in a loop. - * - * @throws IOException - */ - @Test - public void testCreateVolumesInLoop() throws IOException { - super.testCreateVolumesInLoop(port); - Assert.assertEquals(0, cluster.getOzoneManager() - .getMetrics().getNumVolumeCreateFails()); - } - /** - * Get volumes owned by the user. - * - * @throws IOException - */ - @Ignore("Test is ignored for time being, to be enabled after security.") - public void testGetVolumesByUser() throws IOException { - testGetVolumesByUser(port); - } - - /** - * Admins can read volumes belonging to other users. - * - * @throws IOException - */ - @Ignore("Test is ignored for time being, to be enabled after security.") - public void testGetVolumesOfAnotherUser() throws IOException { - super.testGetVolumesOfAnotherUser(port); - } - - /** - * if you try to read volumes belonging to another user, - * then server always ignores it. - * - * @throws IOException - */ - @Ignore("Test is ignored for time being, to be enabled after security.") - public void testGetVolumesOfAnotherUserShouldFail() throws IOException { - super.testGetVolumesOfAnotherUserShouldFail(port); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneWebAccess.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneWebAccess.java deleted file mode 100644 index 63215e0182e..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneWebAccess.java +++ /dev/null @@ -1,118 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.client.rest.headers.Header; -import org.apache.hadoop.util.Time; -import org.apache.http.HttpResponse; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.impl.client.CloseableHttpClient; -import org.apache.http.impl.client.HttpClients; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; - -import javax.ws.rs.core.HttpHeaders; -import java.io.IOException; -import java.text.SimpleDateFormat; -import java.util.Date; -import java.util.Locale; - -import static java.net.HttpURLConnection.HTTP_CREATED; -import static org.apache.hadoop.ozone.web.utils.OzoneUtils.getRequestID; -import static org.junit.Assert.assertEquals; - -/** - * Test Ozone Access through REST protocol. - */ -public class TestOzoneWebAccess { - /** - * Set the timeout for every test. - */ - @Rule - public Timeout testTimeout = new Timeout(300000); - - private static MiniOzoneCluster cluster; - private static int port; - - /** - * Create a MiniDFSCluster for testing. - * - * Ozone is made active by setting OZONE_ENABLED = true - * @throws IOException - */ - @BeforeClass - public static void init() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - - cluster = MiniOzoneCluster.newBuilder(conf).build(); - cluster.waitForClusterToBeReady(); - port = cluster.getHddsDatanodes().get(0) - .getDatanodeDetails().getPort( - DatanodeDetails.Port.Name.REST).getValue(); - } - - /** - * shutdown MiniOzoneCluster. - */ - @AfterClass - public static void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - /** - * Send a vaild Ozone Request. - * - * @throws IOException - */ - @Test - public void testOzoneRequest() throws IOException { - SimpleDateFormat format = - new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US); - CloseableHttpClient client = HttpClients.createDefault(); - String volumeName = getRequestID().toLowerCase(Locale.US); - try { - HttpPost httppost = new HttpPost( - String.format("http://localhost:%d/%s", port, volumeName)); - - httppost.addHeader(Header.OZONE_VERSION_HEADER, - Header.OZONE_V1_VERSION_HEADER); - httppost.addHeader(HttpHeaders.DATE, - format.format(new Date(Time.now()))); - httppost.addHeader(HttpHeaders.AUTHORIZATION, - Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " + - OzoneConsts.OZONE_SIMPLE_HDFS_USER); - httppost.addHeader(Header.OZONE_USER, OzoneConsts.OZONE_SIMPLE_HDFS_USER); - - HttpResponse response = client.execute(httppost); - assertEquals(response.toString(), HTTP_CREATED, - response.getStatusLine().getStatusCode()); - } finally { - client.close(); - } - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java deleted file mode 100644 index 1bfd640297c..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java +++ /dev/null @@ -1,349 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.client; - -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.client.BucketArgs; -import org.apache.hadoop.ozone.client.VolumeArgs; -import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.rest.OzoneException; -import org.apache.hadoop.ozone.client.rest.RestClient; -import org.apache.hadoop.ozone.client.rpc.RpcClient; -import org.apache.hadoop.ozone.web.request.OzoneQuota; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; -import org.apache.hadoop.util.Time; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -import java.io.IOException; -import java.net.URISyntaxException; -import java.text.ParseException; -import java.util.Arrays; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; -import java.util.concurrent.TimeoutException; -import java.util.stream.Collectors; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.junit.Assume.assumeFalse; - -/** - * Test Ozone Bucket Lifecycle. - */ -@RunWith(value = Parameterized.class) -public class TestBuckets { - /** - * Set the timeout for every test. - */ - @Rule - public Timeout testTimeout = new Timeout(300000); - - private static MiniOzoneCluster cluster = null; - private static ClientProtocol client = null; - private static OzoneConfiguration conf; - - @Parameterized.Parameters - public static Collection clientProtocol() { - Object[][] params = new Object[][] { - {RpcClient.class}, - {RestClient.class}}; - return Arrays.asList(params); - } - - @SuppressWarnings("visibilitymodifier") - @Parameterized.Parameter - public static Class clientProtocol; - - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ - @BeforeClass - public static void init() - throws IOException, URISyntaxException, OzoneException, TimeoutException, - InterruptedException { - conf = new OzoneConfiguration(); - - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(3) - .build(); - cluster.waitForClusterToBeReady(); - } - - @Before - public void setup() throws Exception { - if (clientProtocol.equals(RestClient.class)) { - client = new RestClient(conf); - } else { - client = new RpcClient(conf); - } - } - - /** - * shutdown MiniDFSCluster. - */ - @AfterClass - public static void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testCreateBucket() throws Exception { - runTestCreateBucket(client); - } - - static void runTestCreateBucket(ClientProtocol protocol) - throws IOException { - String volumeName = OzoneUtils.getRequestID().toLowerCase(); - VolumeArgs volumeArgs = VolumeArgs.newBuilder() - .setOwner("bilbo") - .setQuota("100TB") - .setAdmin("hdfs") - .build(); - protocol.createVolume(volumeName, volumeArgs); - OzoneVolume vol = protocol.getVolumeDetails(volumeName); - String[] acls = {"user:frodo:rw", "user:samwise:rw"}; - - // create 10 buckets under same volume - for (int x = 0; x < 10; x++) { - long currentTime = Time.now(); - String bucketName = OzoneUtils.getRequestID().toLowerCase(); - - List aclList = - Arrays.stream(acls).map(acl -> OzoneAcl.parseAcl(acl)) - .collect(Collectors.toList()); - BucketArgs bucketArgs = BucketArgs.newBuilder() - .setAcls(aclList) - .build(); - vol.createBucket(bucketName, bucketArgs); - OzoneBucket bucket = vol.getBucket(bucketName); - assertEquals(bucket.getName(), bucketName); - - // verify the bucket creation time - assertTrue((bucket.getCreationTime() / 1000) >= (currentTime / 1000)); - } - protocol.close(); - - assertEquals(vol.getName(), volumeName); - assertEquals(vol.getAdmin(), "hdfs"); - assertEquals(vol.getOwner(), "bilbo"); - assertEquals(vol.getQuota(), OzoneQuota.parseQuota("100TB").sizeInBytes()); - - // Test create a bucket with invalid bucket name, - // not use Rule here because the test method is static. - try { - String invalidBucketName = "#" + OzoneUtils.getRequestID().toLowerCase(); - vol.createBucket(invalidBucketName); - fail("Except the bucket creation to be failed because the" - + " bucket name starts with an invalid char #"); - } catch (Exception e) { - assertTrue(e.getMessage() - .contains("Bucket or Volume name has an unsupported character : #")); - } - } - - @Test - public void testAddBucketAcls() throws Exception { - assumeFalse("Rest Client does not support ACL", - clientProtocol.equals(RestClient.class)); - runTestAddBucketAcls(client); - } - - static void runTestAddBucketAcls(ClientProtocol protocol) - throws OzoneException, IOException, ParseException { - String volumeName = OzoneUtils.getRequestID().toLowerCase(); - VolumeArgs volumeArgs = VolumeArgs.newBuilder() - .setOwner("bilbo") - .setQuota("100TB") - .setAdmin("hdfs") - .build(); - protocol.createVolume(volumeName, volumeArgs); - OzoneVolume vol = protocol.getVolumeDetails(volumeName); - String[] acls = {"user:frodo:rw", "user:samwise:rw"}; - String bucketName = OzoneUtils.getRequestID().toLowerCase(); - vol.createBucket(bucketName); - OzoneBucket bucket = vol.getBucket(bucketName); - - List aclList = - Arrays.stream(acls).map(acl -> OzoneAcl.parseAcl(acl)) - .collect(Collectors.toList()); - int numAcls = bucket.getAcls().size(); - for (OzoneAcl ozoneAcl : aclList) { - Assert.assertTrue(bucket.addAcls(ozoneAcl)); - } - OzoneBucket updatedBucket = vol.getBucket(bucketName); - assertEquals(updatedBucket.getAcls().size(), 2 + numAcls); - // verify if the creation time is missing after update operation - assertTrue( - (updatedBucket.getCreationTime()) / 1000 >= 0); - protocol.close(); - } - - @Test - public void testRemoveBucketAcls() throws Exception { - assumeFalse("Rest Client does not support ACL", - clientProtocol.equals(RestClient.class)); - runTestRemoveBucketAcls(client); - } - - static void runTestRemoveBucketAcls(ClientProtocol protocol) - throws OzoneException, IOException, ParseException { - String volumeName = OzoneUtils.getRequestID().toLowerCase(); - VolumeArgs volumeArgs = VolumeArgs.newBuilder() - .setOwner("bilbo") - .setQuota("100TB") - .setAdmin("hdfs") - .build(); - protocol.createVolume(volumeName, volumeArgs); - OzoneVolume vol = protocol.getVolumeDetails(volumeName); - String[] acls = {"user:frodo:rw", "user:samwise:rw"}; - String bucketName = OzoneUtils.getRequestID().toLowerCase(); - List aclList = - Arrays.stream(acls).map(acl -> OzoneAcl.parseAcl(acl)) - .collect(Collectors.toList()); - vol.createBucket(bucketName); - OzoneBucket bucket = vol.getBucket(bucketName); - int numAcls = bucket.getAcls().size(); - for (OzoneAcl ozoneAcl : aclList) { - Assert.assertTrue(bucket.addAcls(ozoneAcl)); - } - assertEquals(bucket.getAcls().size(), 2 + numAcls); - for (OzoneAcl ozoneAcl : aclList) { - Assert.assertTrue(bucket.removeAcls(ozoneAcl)); - } - OzoneBucket updatedBucket = vol.getBucket(bucketName); - - // We removed all acls - assertEquals(updatedBucket.getAcls().size(), numAcls); - // verify if the creation time is missing after update operation - assertTrue( - (updatedBucket.getCreationTime() / 1000) >= 0); - protocol.close(); - } - - @Test - public void testDeleteBucket() throws OzoneException, IOException { - runTestDeleteBucket(client); - } - - static void runTestDeleteBucket(ClientProtocol protocol) - throws OzoneException, IOException { - String volumeName = OzoneUtils.getRequestID().toLowerCase(); - VolumeArgs volumeArgs = VolumeArgs.newBuilder() - .setOwner("bilbo") - .setQuota("100TB") - .setAdmin("hdfs") - .build(); - protocol.createVolume(volumeName, volumeArgs); - OzoneVolume vol = protocol.getVolumeDetails(volumeName); - String[] acls = {"user:frodo:rw", "user:samwise:rw"}; - String bucketName = OzoneUtils.getRequestID().toLowerCase(); - List aclList = - Arrays.stream(acls).map(acl -> OzoneAcl.parseAcl(acl)) - .collect(Collectors.toList()); - BucketArgs bucketArgs = BucketArgs.newBuilder() - .setAcls(aclList) - .build(); - vol.createBucket(bucketName, bucketArgs); - vol.deleteBucket(bucketName); - try { - OzoneBucket updatedBucket = vol.getBucket(bucketName); - fail("Fetching deleted bucket, Should not reach here."); - } catch (Exception ex) { - // must throw - assertNotNull(ex); - } - protocol.close(); - } - - @Test - public void testListBucket() throws Exception { - runTestListBucket(client); - } - - static void runTestListBucket(ClientProtocol protocol) - throws OzoneException, IOException, ParseException { - String volumeName = OzoneUtils.getRequestID().toLowerCase(); - VolumeArgs volumeArgs = VolumeArgs.newBuilder() - .setOwner("bilbo") - .setQuota("100TB") - .setAdmin("hdfs") - .build(); - protocol.createVolume(volumeName, volumeArgs); - OzoneVolume vol = protocol.getVolumeDetails(volumeName); - String[] acls = {"user:frodo:rw", "user:samwise:rw"}; - List aclList = - Arrays.stream(acls).map(acl -> OzoneAcl.parseAcl(acl)) - .collect(Collectors.toList()); - - long currentTime = Time.now(); - for (int x = 0; x < 10; x++) { - String bucketName = "listbucket-test-" + x; - BucketArgs bucketArgs = BucketArgs.newBuilder() - .setAcls(aclList) - .build(); - vol.createBucket(bucketName, bucketArgs); - } - Iterator bucketIterator = vol.listBuckets(null); - int count = 0; - - while (bucketIterator.hasNext()) { - assertTrue((bucketIterator.next().getCreationTime() - / 1000) >= (currentTime / 1000)); - count++; - } - assertEquals(count, 10); - - bucketIterator = vol.listBuckets(null, "listbucket-test-4"); - assertEquals(getSize(bucketIterator), 5); - - bucketIterator = vol.listBuckets(null, "listbucket-test-3"); - assertEquals(getSize(bucketIterator), 6); - - protocol.close(); - } - - private static int getSize(Iterator bucketIterator) { - int count = 0; - while (bucketIterator.hasNext()) { - count++; - bucketIterator.next(); - } - return count; - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBucketsRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBucketsRatis.java deleted file mode 100644 index efb06983137..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBucketsRatis.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.client; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.RatisTestHelper; -import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.ozone.client.rest.OzoneException; -import org.apache.hadoop.ozone.client.rest.RestClient; -import org.apache.hadoop.ozone.client.rpc.RpcClient; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Ignore; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Collection; - -/** The same as {@link TestBuckets} except that this test is Ratis enabled. */ -@Ignore("Disabling Ratis tests for pipeline work.") -@RunWith(value = Parameterized.class) -public class TestBucketsRatis { - @Rule - public Timeout testTimeout = new Timeout(300000); - - private static RatisTestHelper.RatisTestSuite suite; - private static ClientProtocol client; - private static OzoneConfiguration conf; - - @Parameterized.Parameters - public static Collection clientProtocol() { - Object[][] params = new Object[][] { - {RpcClient.class}, - {RestClient.class}}; - return Arrays.asList(params); - } - - @Parameterized.Parameter - @SuppressWarnings("visibilitymodifier") - public static Class clientProtocol; - - @BeforeClass - public static void init() throws Exception { - suite = new RatisTestHelper.RatisTestSuite(); - conf = suite.getConf(); - } - - @Before - public void setup() throws Exception { - if (clientProtocol.equals(RestClient.class)) { - client = new RestClient(conf); - } else { - client = new RpcClient(conf); - } - } - - @AfterClass - public static void shutdown() { - if (suite != null) { - suite.close(); - } - } - - @Test - public void testCreateBucket() throws Exception { - TestBuckets.runTestCreateBucket(client); - } - - @Test - public void testAddBucketAcls() throws Exception { - TestBuckets.runTestAddBucketAcls(client); - } - - @Test - public void testRemoveBucketAcls() throws Exception { - TestBuckets.runTestRemoveBucketAcls(client); - } - - @Test - public void testDeleteBucket() throws OzoneException, IOException { - TestBuckets.runTestDeleteBucket(client); - } - @Test - public void testListBucket() throws Exception { - TestBuckets.runTestListBucket(client); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java deleted file mode 100644 index c4f10f7468f..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java +++ /dev/null @@ -1,734 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.client; - -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; -import org.apache.commons.codec.digest.DigestUtils; -import org.apache.commons.collections.IteratorUtils; -import org.apache.commons.io.FileUtils; -import org.apache.commons.io.IOUtils; -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.commons.lang3.RandomUtils; -import org.apache.commons.lang3.tuple.ImmutablePair; -import org.apache.commons.lang3.tuple.Pair; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.ozone.OzoneTestUtils; -import org.apache.hadoop.ozone.client.BucketArgs; -import org.apache.hadoop.ozone.client.VolumeArgs; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneKey; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.io.OzoneInputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.ozone.client.rpc.RpcClient; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; - -import org.apache.hadoop.ozone.client.rest.OzoneException; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.Time; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; - -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.InputStream; -import java.io.IOException; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.Random; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; - -import static org.apache.hadoop.hdds - .HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_STALENODE_INTERVAL; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -/** - * Test Ozone Key Lifecycle. - */ -public class TestKeys { - /** - * Set the timeout for every test. - */ - @Rule - public Timeout testTimeout = new Timeout(300000); - - private static OzoneConfiguration conf; - private static MiniOzoneCluster ozoneCluster = null; - private static String path; - private static ClientProtocol client = null; - private static long currentTime; - private static ReplicationFactor replicationFactor = ReplicationFactor.ONE; - private static ReplicationType replicationType = ReplicationType.STAND_ALONE; - - - /** - * Create a MiniDFSCluster for testing. - * - * @throws IOException - */ - @Before - public void init() throws Exception { - conf = new OzoneConfiguration(); - - // Set short block deleting service interval to speed up deletions. - conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL, - 1000, TimeUnit.MILLISECONDS); - conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS); - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 30, TimeUnit.SECONDS); - - path = GenericTestUtils.getTempPath(TestKeys.class.getSimpleName()); - Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG); - - ozoneCluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(1) - .setHbInterval(1000) - .setHbProcessorInterval(1000) - .build(); - ozoneCluster.waitForClusterToBeReady(); - client = new RpcClient(conf); - currentTime = Time.now(); - } - - /** - * shutdown MiniDFSCluster. - */ - @After - public void shutdown() { - if (ozoneCluster != null) { - ozoneCluster.shutdown(); - } - } - - /** - * Creates a file with Random Data. - * - * @return File. - */ - static File createRandomDataFile(String dir, String fileName, long size) - throws IOException { - File tmpDir = new File(dir); - FileUtils.forceMkdir(tmpDir); - File tmpFile = new File(tmpDir, fileName); - try (FileOutputStream randFile = new FileOutputStream(tmpFile)) { - Random r = new Random(); - for (int x = 0; x < size; x++) { - char c = (char) (r.nextInt(26) + 'a'); - randFile.write(c); - } - - } catch (IOException e) { - fail(e.getMessage()); - } - return tmpFile; - } - - /** - * This function generates multi part key which are delimited by a certain - * delimiter. Different parts of key are random string of random length - * between 0 - 4. Number of parts of the keys are between 0 and 5. - * - * @param delimiter delimiter used to delimit parts of string - * @return Key composed of multiple parts delimited by "/" - */ - static String getMultiPartKey(String delimiter) { - int numParts = RandomUtils.nextInt(0, 5) + 1; - String[] nameParts = new String[numParts]; - for (int i = 0; i < numParts; i++) { - int stringLength = numParts == 1 ? 5 : RandomUtils.nextInt(0, 5); - nameParts[i] = RandomStringUtils.randomAlphanumeric(stringLength); - } - return StringUtils.join(delimiter, nameParts); - } - - static class PutHelper { - private final ClientProtocol client; - private final String dir; - private final String keyName; - - private OzoneVolume vol; - private OzoneBucket bucket; - private File file; - - PutHelper(ClientProtocol client, String dir) { - this(client, dir, OzoneUtils.getRequestID().toLowerCase()); - } - - PutHelper(ClientProtocol client, String dir, String key) { - this.client = client; - this.dir = dir; - this.keyName = key; - } - - public OzoneVolume getVol() { - return vol; - } - - public OzoneBucket getBucket() { - return bucket; - } - - public File getFile() { - return file; - } - - /** - * This function is reused in all other tests. - * - * @return Returns the name of the new key that was created. - * @throws OzoneException - */ - private String putKey() throws Exception { - String volumeName = OzoneUtils.getRequestID().toLowerCase(); - - VolumeArgs volumeArgs = VolumeArgs.newBuilder() - .setOwner("bilbo") - .setQuota("100TB") - .setAdmin("hdfs") - .build(); - client.createVolume(volumeName, volumeArgs); - vol = client.getVolumeDetails(volumeName); - String[] acls = {"user:frodo:rw", "user:samwise:rw"}; - - String bucketName = OzoneUtils.getRequestID().toLowerCase(); - List aclList = - Arrays.stream(acls).map(acl -> OzoneAcl.parseAcl(acl)) - .collect(Collectors.toList()); - BucketArgs bucketArgs = BucketArgs.newBuilder() - .setAcls(aclList) - .build(); - vol.createBucket(bucketName, bucketArgs); - bucket = vol.getBucket(bucketName); - - String fileName = OzoneUtils.getRequestID().toLowerCase(); - - file = createRandomDataFile(dir, fileName, 1024); - - try ( - OzoneOutputStream ozoneOutputStream = bucket - .createKey(keyName, 0, replicationType, replicationFactor, - new HashMap<>()); - InputStream fileInputStream = new FileInputStream(file)) { - IOUtils.copy(fileInputStream, ozoneOutputStream); - } - return keyName; - } - } - - @Test - public void testPutKey() throws Exception { - // Test non-delimited keys - runTestPutKey(new PutHelper(client, path)); - // Test key delimited by a random delimiter - String delimiter = RandomStringUtils.randomAscii(1); - runTestPutKey(new PutHelper(client, path, - getMultiPartKey(delimiter))); - } - - @SuppressWarnings("emptyblock") - static void runTestPutKey(PutHelper helper) throws Exception { - final ClientProtocol helperClient = helper.client; - helper.putKey(); - assertNotNull(helper.getBucket()); - assertNotNull(helper.getFile()); - List keyList = helperClient - .listKeys(helper.getVol().getName(), helper.getBucket().getName(), null, - null, 10); - Assert.assertEquals(1, keyList.size()); - - // test list key using a more efficient call - String newkeyName = OzoneUtils.getRequestID().toLowerCase(); - OzoneOutputStream ozoneOutputStream = helperClient - .createKey(helper.getVol().getName(), helper.getBucket().getName(), - newkeyName, 0, replicationType, replicationFactor, new HashMap<>()); - ozoneOutputStream.close(); - keyList = helperClient - .listKeys(helper.getVol().getName(), helper.getBucket().getName(), null, - null, 10); - Assert.assertEquals(2, keyList.size()); - - // test new put key with invalid volume/bucket name - OzoneTestUtils.expectOmException(ResultCodes.VOLUME_NOT_FOUND, () -> { - - try (OzoneOutputStream oos = helperClient - .createKey("invalid-volume", helper.getBucket().getName(), newkeyName, - 0, replicationType, replicationFactor, new HashMap<>())) { - } - }); - - OzoneTestUtils.expectOmException(ResultCodes.BUCKET_NOT_FOUND, () -> { - try (OzoneOutputStream oos = helperClient - .createKey(helper.getVol().getName(), "invalid-bucket", newkeyName, 0, - replicationType, replicationFactor, new HashMap<>())) { - } - }); - } - - private static void restartDatanode(MiniOzoneCluster cluster, int datanodeIdx) - throws Exception { - cluster.restartHddsDatanode(datanodeIdx, true); - } - - @Test - public void testPutAndGetKeyWithDnRestart() throws Exception { - runTestPutAndGetKeyWithDnRestart( - new PutHelper(client, path), ozoneCluster); - String delimiter = RandomStringUtils.randomAscii(1); - runTestPutAndGetKeyWithDnRestart( - new PutHelper(client, path, - getMultiPartKey(delimiter)), ozoneCluster); - } - - static void runTestPutAndGetKeyWithDnRestart( - PutHelper helper, MiniOzoneCluster cluster) throws Exception { - String keyName = helper.putKey(); - assertNotNull(helper.getBucket()); - assertNotNull(helper.getFile()); - - // restart the datanode - restartDatanode(cluster, 0); - // verify getKey after the datanode restart - String newFileName = helper.dir + "/" - + OzoneUtils.getRequestID().toLowerCase(); - Path newPath = Paths.get(newFileName); - try ( - FileOutputStream newOutputStream = new FileOutputStream( - newPath.toString()); - OzoneInputStream ozoneInputStream = helper.client - .getKey(helper.getVol().getName(), helper.getBucket().getName(), - keyName)) { - IOUtils.copy(ozoneInputStream, newOutputStream); - } - - try ( - FileInputStream original = new FileInputStream(helper.getFile()); - FileInputStream downloaded = new FileInputStream(newPath.toFile())) { - String originalHash = DigestUtils.sha256Hex(original); - String downloadedHash = DigestUtils.sha256Hex(downloaded); - assertEquals( - "Sha256 does not match between original file and downloaded file.", - originalHash, downloadedHash); - } - } - - @Test - public void testPutAndGetKey() throws Exception { - runTestPutAndGetKey(new PutHelper(client, path)); - String delimiter = RandomStringUtils.randomAscii(1); - runTestPutAndGetKey(new PutHelper(client, path, - getMultiPartKey(delimiter))); - } - - static void runTestPutAndGetKey(PutHelper helper) throws Exception { - final ClientProtocol helperClient = helper.client; - - String keyName = helper.putKey(); - assertNotNull(helper.getBucket()); - assertNotNull(helper.getFile()); - - final String newFileName1 = helper.dir + "/" - + OzoneUtils.getRequestID().toLowerCase(); - final String newFileName2 = helper.dir + "/" - + OzoneUtils.getRequestID().toLowerCase(); - - Path newPath1 = Paths.get(newFileName1); - Path newPath2 = Paths.get(newFileName2); - - try ( - FileOutputStream newOutputStream = new FileOutputStream( - newPath1.toString()); - OzoneInputStream ozoneInputStream = helper.getBucket() - .readKey(keyName)) { - IOUtils.copy(ozoneInputStream, newOutputStream); - } - - // test get key using a more efficient call - try ( - FileOutputStream newOutputStream = new FileOutputStream( - newPath2.toString()); - OzoneInputStream ozoneInputStream = helper.getBucket() - .readKey(keyName)) { - IOUtils.copy(ozoneInputStream, newOutputStream); - } - - try (FileInputStream original = new FileInputStream(helper.getFile()); - FileInputStream downloaded1 = new FileInputStream(newPath1.toFile()); - FileInputStream downloaded2 = new FileInputStream(newPath1.toFile())) { - String originalHash = DigestUtils.sha256Hex(original); - String downloadedHash1 = DigestUtils.sha256Hex(downloaded1); - String downloadedHash2 = DigestUtils.sha256Hex(downloaded2); - - assertEquals( - "Sha256 does not match between original file and downloaded file.", - originalHash, downloadedHash1); - assertEquals( - "Sha256 does not match between original file and downloaded file.", - originalHash, downloadedHash2); - - // test new get key with invalid volume/bucket name - OzoneTestUtils.expectOmException(ResultCodes.KEY_NOT_FOUND, - () -> helperClient.getKey( - "invalid-volume", helper.getBucket().getName(), keyName)); - - OzoneTestUtils.expectOmException(ResultCodes.KEY_NOT_FOUND, - () -> helperClient.getKey( - helper.getVol().getName(), "invalid-bucket", keyName)); - } - } - - @Test - public void testPutAndDeleteKey() throws Exception { - runTestPutAndDeleteKey(new PutHelper(client, path)); - String delimiter = RandomStringUtils.randomAscii(1); - runTestPutAndDeleteKey(new PutHelper(client, path, - getMultiPartKey(delimiter))); - } - - static void runTestPutAndDeleteKey(PutHelper helper) throws Exception { - String keyName = helper.putKey(); - assertNotNull(helper.getBucket()); - assertNotNull(helper.getFile()); - helper.getBucket().deleteKey(keyName); - - OzoneTestUtils.expectOmException(ResultCodes.KEY_NOT_FOUND, () -> { - helper.getBucket().getKey(keyName); - }); - } - - @Test - public void testPutAndListKey() throws Exception { - runTestPutAndListKey(new PutHelper(client, path)); - String delimiter = RandomStringUtils.randomAscii(1); - runTestPutAndListKey(new PutHelper(client, path, - getMultiPartKey(delimiter))); - } - - static void runTestPutAndListKey(PutHelper helper) throws Exception { - ClientProtocol helperClient = helper.client; - helper.putKey(); - assertNotNull(helper.getBucket()); - assertNotNull(helper.getFile()); - - // add keys [list-key0, list-key1, ..., list-key9] - for (int x = 0; x < 10; x++) { - String newkeyName = "list-key" + x; - try ( - OzoneOutputStream ozoneOutputStream = helper.getBucket() - .createKey(newkeyName, 0, replicationType, replicationFactor, - new HashMap<>()); - InputStream fileInputStream = new FileInputStream(helper.getFile())) { - IOUtils.copy(fileInputStream, ozoneOutputStream); - } - } - - List keyList1 = - IteratorUtils.toList(helper.getBucket().listKeys(null, null)); - // test list key using a more efficient call - List keyList2 = helperClient - .listKeys(helper.getVol().getName(), helper.getBucket().getName(), null, - null, 100); - - Assert.assertEquals(11, keyList1.size()); - Assert.assertEquals(11, keyList2.size()); - // Verify the key creation/modification time. Here we compare the time in - // second unit since the date string reparsed to millisecond will - // lose precision. - for (OzoneKey key : keyList1) { - assertTrue((key.getCreationTime() / 1000) >= (currentTime / 1000)); - assertTrue((key.getModificationTime() / 1000) >= (currentTime / 1000)); - } - - for (OzoneKey key : keyList2) { - assertTrue((key.getCreationTime() / 1000) >= (currentTime / 1000)); - assertTrue((key.getModificationTime() / 1000) >= (currentTime / 1000)); - } - - // test maxLength parameter of list keys - keyList2 = helperClient - .listKeys(helper.getVol().getName(), helper.getBucket().getName(), null, - null, 1); - Assert.assertEquals(1, keyList2.size()); - - // test startKey parameter of list keys - keyList1 = IteratorUtils - .toList(helper.getBucket().listKeys("list-key", "list-key4")); - keyList2 = helperClient - .listKeys(helper.getVol().getName(), helper.getBucket().getName(), - "list-key", "list-key4", 100); - Assert.assertEquals(5, keyList1.size()); - Assert.assertEquals(5, keyList2.size()); - - // test prefix parameter of list keys - keyList1 = - IteratorUtils.toList(helper.getBucket().listKeys("list-key2", null)); - keyList2 = helperClient - .listKeys(helper.getVol().getName(), helper.getBucket().getName(), - "list-key2", null, 100); - Assert.assertTrue( - keyList1.size() == 1 && keyList1.get(0).getName().equals("list-key2")); - Assert.assertTrue( - keyList2.size() == 1 && keyList2.get(0).getName().equals("list-key2")); - - // test new list keys with invalid volume/bucket name - OzoneTestUtils.expectOmException(ResultCodes.BUCKET_NOT_FOUND, () -> { - helperClient.listKeys("invalid-volume", helper.getBucket().getName(), - null, null, 100); - }); - - OzoneTestUtils.expectOmException(ResultCodes.BUCKET_NOT_FOUND, () -> { - helperClient.listKeys(helper.getVol().getName(), "invalid-bucket", null, - null, 100); - }); - } - - @Test - public void testGetKeyInfo() throws Exception { - runTestGetKeyInfo(new PutHelper(client, path)); - String delimiter = RandomStringUtils.randomAscii(1); - runTestGetKeyInfo(new PutHelper(client, path, - getMultiPartKey(delimiter))); - } - - static void runTestGetKeyInfo(PutHelper helper) throws Exception { - String keyName = helper.putKey(); - assertNotNull(helper.getBucket()); - assertNotNull(helper.getFile()); - - OzoneKey keyInfo = helper.getBucket().getKey(keyName); - assertNotNull(keyInfo); - assertEquals(keyName, keyInfo.getName()); - - // Compare the time in second unit since the date string reparsed to - // millisecond will lose precision. - Assert - .assertTrue((keyInfo.getCreationTime() / 1000) >= (currentTime / 1000)); - Assert.assertTrue( - (keyInfo.getModificationTime() / 1000) >= (currentTime / 1000)); - } - - // Volume, bucket, keys info that helps for test create/delete keys. - private static class BucketKeys { - - private Map, List> buckets; - - BucketKeys() { - buckets = Maps.newHashMap(); - } - - void addKey(String volume, String bucket, String key) { - // check if this bucket exists - for (Map.Entry, List> entry : - buckets.entrySet()) { - if (entry.getKey().getValue().equals(bucket)) { - entry.getValue().add(key); - return; - } - } - - // bucket not exist - Pair newBucket = new ImmutablePair(volume, bucket); - List keyList = Lists.newArrayList(); - keyList.add(key); - buckets.put(newBucket, keyList); - } - - Set> getAllBuckets() { - return buckets.keySet(); - } - - List getBucketKeys(String bucketName) { - for (Map.Entry, List> entry : buckets - .entrySet()) { - if (entry.getKey().getValue().equals(bucketName)) { - return entry.getValue(); - } - } - return Lists.newArrayList(); - } - - int totalNumOfKeys() { - int count = 0; - for (Map.Entry, List> entry : buckets - .entrySet()) { - count += entry.getValue().size(); - } - return count; - } - } - - private int countOmKeys(OzoneManager om) throws IOException { - int totalCount = 0; - List volumes = - om.listAllVolumes(null, null, Integer.MAX_VALUE); - for (OmVolumeArgs volume : volumes) { - List buckets = - om.listBuckets(volume.getVolume(), null, null, Integer.MAX_VALUE); - for (OmBucketInfo bucket : buckets) { - List keys = om.listKeys(bucket.getVolumeName(), - bucket.getBucketName(), null, null, Integer.MAX_VALUE); - totalCount += keys.size(); - } - } - return totalCount; - } - - @Test - @Ignore("Until delete background service is fixed.") - public void testDeleteKey() throws Exception { - OzoneManager ozoneManager = ozoneCluster.getOzoneManager(); - // To avoid interference from other test cases, - // we collect number of existing keys at the beginning - int numOfExistedKeys = countOmKeys(ozoneManager); - - // Keep tracking bucket keys info while creating them - PutHelper helper = new PutHelper(client, path); - BucketKeys bucketKeys = new BucketKeys(); - for (int i = 0; i < 20; i++) { - String keyName = helper.putKey(); - bucketKeys.addKey(helper.getVol().getName(), helper.getBucket().getName(), - keyName); - } - - // There should be 20 keys in the buckets we just created. - Assert.assertEquals(20, bucketKeys.totalNumOfKeys()); - - int numOfCreatedKeys = 0; - OzoneContainer cm = ozoneCluster.getHddsDatanodes().get(0) - .getDatanodeStateMachine().getContainer(); - - // Expected to delete chunk file list. - List expectedChunkFiles = Lists.newArrayList(); - // Iterate over all buckets, and list all keys in each bucket, - // count the total number of created keys. - Set> buckets = bucketKeys.getAllBuckets(); - for (Pair buk : buckets) { - List createdKeys = - ozoneManager.listKeys(buk.getKey(), buk.getValue(), null, null, 20); - - // Memorize chunks that has been created, - // so we can verify actual deletions at DN side later. - for (OmKeyInfo keyInfo : createdKeys) { - List locations = - keyInfo.getLatestVersionLocations().getLocationList(); - OzoneTestUtils.closeContainers(keyInfo.getKeyLocationVersions(), - ozoneCluster.getStorageContainerManager()); - for (OmKeyLocationInfo location : locations) { - KeyValueHandler keyValueHandler = (KeyValueHandler) cm - .getDispatcher().getHandler(ContainerProtos.ContainerType - .KeyValueContainer); - KeyValueContainer container = (KeyValueContainer) cm.getContainerSet() - .getContainer(location.getBlockID().getContainerID()); - BlockData blockInfo = keyValueHandler.getBlockManager() - .getBlock(container, location.getBlockID()); - KeyValueContainerData containerData = - (KeyValueContainerData) container.getContainerData(); - File dataDir = new File(containerData.getChunksPath()); - for (ContainerProtos.ChunkInfo chunkInfo : blockInfo.getChunks()) { - File chunkFile = dataDir.toPath() - .resolve(chunkInfo.getChunkName()).toFile(); - System.out.println("Chunk File created: " - + chunkFile.getAbsolutePath()); - Assert.assertTrue(chunkFile.exists()); - expectedChunkFiles.add(chunkFile); - } - } - } - numOfCreatedKeys += createdKeys.size(); - } - - // Ensure all keys are created. - Assert.assertEquals(20, numOfCreatedKeys); - - // Ensure all keys are visible from OM. - // Total number should be numOfCreated + numOfExisted - Assert.assertEquals(20 + numOfExistedKeys, countOmKeys(ozoneManager)); - - // Delete 10 keys - int delCount = 20; - Set> allBuckets = bucketKeys.getAllBuckets(); - for (Pair bucketInfo : allBuckets) { - List bks = bucketKeys.getBucketKeys(bucketInfo.getValue()); - for (String keyName : bks) { - if (delCount > 0) { - OmKeyArgs arg = - new OmKeyArgs.Builder().setVolumeName(bucketInfo.getKey()) - .setBucketName(bucketInfo.getValue()).setKeyName(keyName) - .build(); - ozoneManager.deleteKey(arg); - delCount--; - } - } - } - - // It should be pretty quick that keys are removed from OM namespace, - // because actual deletion happens in async mode. - GenericTestUtils.waitFor(() -> { - try { - int num = countOmKeys(ozoneManager); - return num == (numOfExistedKeys); - } catch (IOException e) { - return false; - } - }, 1000, 10000); - - // It might take a while until all blocks are actually deleted, - // verify all chunk files created earlier are removed from disk. - GenericTestUtils.waitFor( - () -> expectedChunkFiles.stream().allMatch(file -> !file.exists()), - 1000, 60000); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeysRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeysRatis.java deleted file mode 100644 index 2e8f5394526..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeysRatis.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.client; - -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.RatisTestHelper; -import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; - -import static org.apache.hadoop.ozone.web.client - .TestKeys.PutHelper; -import static org.apache.hadoop.ozone.web.client - .TestKeys.getMultiPartKey; -import static org.apache.hadoop.ozone.web.client - .TestKeys.runTestGetKeyInfo; -import static org.apache.hadoop.ozone.web.client - .TestKeys.runTestPutAndDeleteKey; -import static org.apache.hadoop.ozone.web.client - .TestKeys.runTestPutAndGetKey; -import static org.apache.hadoop.ozone.web.client - .TestKeys.runTestPutAndGetKeyWithDnRestart; -import static org.apache.hadoop.ozone.web.client - .TestKeys.runTestPutAndListKey; -import static org.apache.hadoop.ozone.web.client - .TestKeys.runTestPutKey; - -/** The same as {@link TestKeys} except that this test is Ratis enabled. */ -public class TestKeysRatis { - @Rule - public Timeout testTimeout = new Timeout(300000); - private static RatisTestHelper.RatisTestSuite suite; - private static MiniOzoneCluster ozoneCluster = null; - static private String path; - private static ClientProtocol client = null; - - @BeforeClass - public static void init() throws Exception { - suite = new RatisTestHelper.RatisTestSuite(); - path = GenericTestUtils.getTempPath(TestKeysRatis.class.getSimpleName()); - ozoneCluster = suite.getCluster(); - ozoneCluster.waitForClusterToBeReady(); - client = suite.newOzoneClient(); - } - - /** - * shutdown MiniDFSCluster. - */ - @AfterClass - public static void shutdown() { - if (suite != null) { - suite.close(); - } - } - - - @Test - public void testPutKey() throws Exception { - runTestPutKey(new PutHelper(client, path)); - String delimiter = RandomStringUtils.randomAlphanumeric(1); - runTestPutKey(new PutHelper(client, path, - getMultiPartKey(delimiter))); - } - - @Test - public void testPutAndGetKeyWithDnRestart() throws Exception { - runTestPutAndGetKeyWithDnRestart( - new PutHelper(client, path), ozoneCluster); - String delimiter = RandomStringUtils.randomAlphanumeric(1); - runTestPutAndGetKeyWithDnRestart( - new PutHelper(client, path, getMultiPartKey(delimiter)), - ozoneCluster); - } - - @Test - public void testPutAndGetKey() throws Exception { - runTestPutAndGetKey(new PutHelper(client, path)); - String delimiter = RandomStringUtils.randomAlphanumeric(1); - runTestPutAndGetKey(new PutHelper(client, path, - getMultiPartKey(delimiter))); - } - - @Test - public void testPutAndDeleteKey() throws Exception { - runTestPutAndDeleteKey(new PutHelper(client, path)); - String delimiter = RandomStringUtils.randomAlphanumeric(1); - runTestPutAndDeleteKey(new PutHelper(client, path, - getMultiPartKey(delimiter))); - } - - @Test - public void testPutAndListKey() throws Exception { - runTestPutAndListKey(new PutHelper(client, path)); - String delimiter = RandomStringUtils.randomAlphanumeric(1); - runTestPutAndListKey(new PutHelper(client, path, - getMultiPartKey(delimiter))); - } - - @Test - public void testGetKeyInfo() throws Exception { - runTestGetKeyInfo(new PutHelper(client, path)); - String delimiter = RandomStringUtils.randomAlphanumeric(1); - runTestGetKeyInfo(new PutHelper(client, path, - getMultiPartKey(delimiter))); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestOzoneClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestOzoneClient.java deleted file mode 100644 index f8f57d70f81..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestOzoneClient.java +++ /dev/null @@ -1,304 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.client; - -import io.netty.bootstrap.Bootstrap; -import io.netty.channel.Channel; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelInitializer; -import io.netty.channel.ChannelOption; -import io.netty.channel.ChannelPipeline; -import io.netty.channel.EventLoopGroup; -import io.netty.channel.SimpleChannelInboundHandler; -import io.netty.channel.nio.NioEventLoopGroup; -import io.netty.channel.socket.SocketChannel; -import io.netty.channel.socket.nio.NioSocketChannel; -import io.netty.handler.codec.http.DefaultFullHttpRequest; -import io.netty.handler.codec.http.FullHttpRequest; -import io.netty.handler.codec.http.HttpClientCodec; -import io.netty.handler.codec.http.HttpContent; -import io.netty.handler.codec.http.HttpContentDecompressor; -import io.netty.handler.codec.http.HttpMethod; -import io.netty.handler.codec.http.HttpObject; -import io.netty.handler.codec.http.HttpResponse; -import io.netty.handler.codec.http.HttpResponseStatus; -import io.netty.handler.codec.http.HttpVersion; -import io.netty.handler.codec.http.LastHttpContent; -import io.netty.handler.logging.LogLevel; -import io.netty.handler.logging.LoggingHandler; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.client.rest.headers.Header; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.Time; -import org.apache.http.HttpEntity; -import org.apache.http.client.methods.CloseableHttpResponse; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.utils.URIBuilder; -import org.apache.http.impl.client.CloseableHttpClient; -import org.apache.http.impl.client.HttpClients; -import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; -import org.apache.http.util.EntityUtils; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import javax.ws.rs.core.HttpHeaders; -import java.io.IOException; -import java.net.HttpURLConnection; -import java.net.URI; -import java.net.URISyntaxException; -import java.text.SimpleDateFormat; -import java.util.Date; -import java.util.Locale; -import java.util.UUID; - -import static io.netty.util.CharsetUtil.UTF_8; - -/** - * Unit tests for Ozone client connection reuse with Apache HttpClient and Netty - * based HttpClient. - */ -public class TestOzoneClient { - private static Logger log = Logger.getLogger(TestOzoneClient.class); - private static int testVolumeCount = 5; - private static MiniOzoneCluster cluster = null; - private static String endpoint = null; - - @BeforeClass - public static void init() throws Exception { - Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.ALL); - OzoneConfiguration conf = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(conf).build(); - cluster.waitForClusterToBeReady(); - int port = cluster.getHddsDatanodes().get(0) - .getDatanodeDetails() - .getPort(DatanodeDetails.Port.Name.REST).getValue(); - endpoint = String.format("http://localhost:%d", port); - } - - @AfterClass - public static void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test(timeout = 5000) - public void testNewConnectionPerRequest() - throws IOException, URISyntaxException { - for (int i = 0; i < testVolumeCount; i++) { - try (CloseableHttpClient httpClient = - HttpClients.createDefault()) { - createVolume(getRandomVolumeName(i), httpClient); - } - } - } - - /** - * Object handler should be able to serve multiple requests from - * a single http client. This allows the client side to reuse - * http connections in a connection pool instead of creating a new - * connection per request which consumes resource heavily. - * - */ - @Test(timeout = 5000) - public void testReuseWithApacheHttpClient() - throws IOException, URISyntaxException { - - PoolingHttpClientConnectionManager cm = - new PoolingHttpClientConnectionManager(); - cm.setMaxTotal(200); - cm.setDefaultMaxPerRoute(20); - - try (CloseableHttpClient httpClient = - HttpClients.custom().setConnectionManager(cm).build()) { - for (int i = 0; i < testVolumeCount; i++) { - createVolume(getRandomVolumeName(i), httpClient); - } - } - } - - @Test(timeout = 10000) - public void testReuseWithNettyHttpClient() - throws IOException, InterruptedException, URISyntaxException { - URI uri = new URI(endpoint); - String host = uri.getHost() == null? "127.0.0.1" : uri.getHost(); - int port = uri.getPort(); - - EventLoopGroup workerGroup = new NioEventLoopGroup(); - try { - Bootstrap b = new Bootstrap(); - b.group(workerGroup) - .channel(NioSocketChannel.class) - .option(ChannelOption.SO_KEEPALIVE, true) - .option(ChannelOption.SO_REUSEADDR, true) - .handler(new ChannelInitializer() { - /** - * This method will be called once the {@link Channel} was - * registered. After the method returns this instance - * will be removed from the {@link ChannelPipeline} - * of the {@link Channel}. - * - * @param ch the {@link Channel} which was registered. - * @throws Exception is thrown if an error occurs. - * In that case the {@link Channel} will be closed. - */ - @Override - public void initChannel(SocketChannel ch) { - ChannelPipeline p = ch.pipeline(); - - // Comment the following line if you don't want client http trace - p.addLast("log", new LoggingHandler(LogLevel.INFO)); - p.addLast(new HttpClientCodec()); - p.addLast(new HttpContentDecompressor()); - p.addLast(new NettyHttpClientHandler()); - } - }); - - Channel ch = b.connect(host, port).sync().channel(); - for (int i = 0; i < testVolumeCount; i++) { - String volumeName = getRandomVolumeName(i); - try { - sendNettyCreateVolumeRequest(ch, volumeName); - Thread.sleep(1000); - } catch (Exception e) { - e.printStackTrace(); - } - } - - Thread.sleep(1000); - ch.close(); - // Wait for the server to close the connection. - ch.closeFuture().sync(); - } catch (Exception ex) { - log.error("Error received in client setup", ex); - }finally { - workerGroup.shutdownGracefully(); - } - } - - class NettyHttpClientHandler extends - SimpleChannelInboundHandler { - - @Override - public void channelRead0(ChannelHandlerContext ctx, HttpObject msg) { - if (msg instanceof HttpResponse) { - HttpResponse response = (HttpResponse) msg; - log.info("STATUS: " + response.getStatus()); - log.info("VERSION: " + response.getProtocolVersion()); - Assert.assertEquals(HttpResponseStatus.CREATED.code(), - response.getStatus().code()); - } - if (msg instanceof HttpContent) { - HttpContent content = (HttpContent) msg; - log.info(content.content().toString(UTF_8)); - if (content instanceof LastHttpContent) { - log.info("END OF CONTENT"); - } - } - } - - @Override - public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { - log.error("Exception upon channel read", cause); - ctx.close(); - } - } - - private String getRandomVolumeName(int index) { - UUID id = UUID.randomUUID(); - return "test-volume-" + index + "-" + id; - - } - - // Prepare the HTTP request and send it over the netty channel. - private void sendNettyCreateVolumeRequest(Channel channel, String volumeName) - throws URISyntaxException, IOException { - URIBuilder builder = new URIBuilder(endpoint); - builder.setPath("/" + volumeName); - URI uri = builder.build(); - - String host = uri.getHost() == null ? "127.0.0.1" : uri.getHost(); - FullHttpRequest request = new DefaultFullHttpRequest( - HttpVersion.HTTP_1_1, HttpMethod.POST, uri.getRawPath()); - - SimpleDateFormat format = - new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US); - request.headers().set(HttpHeaders.HOST, host); - request.headers().add(HttpHeaders.CONTENT_TYPE, "application/json"); - request.headers().set(Header.OZONE_VERSION_HEADER, - Header.OZONE_V1_VERSION_HEADER); - request.headers().set(HttpHeaders.DATE, - format.format(new Date(Time.monotonicNow()))); - request.headers().set(Header.OZONE_USER, - UserGroupInformation.getCurrentUser().getUserName()); - request.headers().set(HttpHeaders.AUTHORIZATION, - Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " - + OzoneConsts.OZONE_SIMPLE_HDFS_USER); - - // Send the HTTP request via netty channel. - channel.writeAndFlush(request); - } - - // It is caller's responsibility to close the client. - private void createVolume(String volumeName, CloseableHttpClient httpClient) - throws IOException, URISyntaxException { - HttpPost create1 = - getCreateVolumeRequest(volumeName); - HttpEntity entity = null; - try { - CloseableHttpResponse response1 = - httpClient.execute(create1); - Assert.assertEquals(HttpURLConnection.HTTP_CREATED, - response1.getStatusLine().getStatusCode()); - entity = response1.getEntity(); - } catch (IOException e) { - e.printStackTrace(); - } finally { - EntityUtils.consumeQuietly(entity); - } - } - - private HttpPost getCreateVolumeRequest(String volumeName) - throws URISyntaxException, IOException { - URIBuilder builder = new URIBuilder(endpoint); - builder.setPath("/" + volumeName); - HttpPost httpPost = new HttpPost(builder.build().toString()); - SimpleDateFormat format = - new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US); - httpPost.addHeader(Header.OZONE_VERSION_HEADER, - Header.OZONE_V1_VERSION_HEADER); - httpPost.addHeader(HttpHeaders.DATE, - format.format(new Date(Time.monotonicNow()))); - httpPost.addHeader(Header.OZONE_USER, - UserGroupInformation.getCurrentUser().getUserName()); - httpPost.addHeader(HttpHeaders.AUTHORIZATION, - Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " - + OzoneConsts.OZONE_SIMPLE_HDFS_USER); - return httpPost; - } - -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java deleted file mode 100644 index 3b3de4c3249..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java +++ /dev/null @@ -1,381 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.client; - -import java.io.File; -import java.io.IOException; -import java.text.ParseException; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; - -import org.apache.hadoop.hdds.client.OzoneQuota; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneTestUtils; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.VolumeArgs; -import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.ozone.client.rest.OzoneException; -import org.apache.hadoop.ozone.client.rest.RestClient; -import org.apache.hadoop.ozone.client.rpc.RpcClient; -import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.Time; - -import org.apache.commons.io.FileUtils; -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; -import org.junit.AfterClass; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Ignore; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -/** - * Test Ozone Volumes Lifecycle. - */ -@RunWith(value = Parameterized.class) -public class TestVolume { - private static MiniOzoneCluster cluster = null; - private static ClientProtocol client = null; - private static OzoneConfiguration conf; - - @Parameterized.Parameters - public static Collection clientProtocol() { - Object[][] params = new Object[][] { - {RpcClient.class}}; - return Arrays.asList(params); - } - - @SuppressWarnings("visibilitymodifier") - @Parameterized.Parameter - public Class clientProtocol; - - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ - @BeforeClass - public static void init() throws Exception { - conf = new OzoneConfiguration(); - - String path = GenericTestUtils - .getTempPath(TestVolume.class.getSimpleName()); - FileUtils.deleteDirectory(new File(path)); - - Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG); - - cluster = MiniOzoneCluster.newBuilder(conf).build(); - cluster.waitForClusterToBeReady(); - } - - @Before - public void setup() throws Exception { - if (clientProtocol.equals(RestClient.class)) { - client = new RestClient(conf); - } else { - client = new RpcClient(conf); - } - } - - /** - * shutdown MiniDFSCluster. - */ - @AfterClass - public static void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testCreateVolume() throws Exception { - runTestCreateVolume(client); - } - - static void runTestCreateVolume(ClientProtocol clientProtocol) - throws OzoneException, IOException, ParseException { - String volumeName = OzoneUtils.getRequestID().toLowerCase(); - - long currentTime = Time.now(); - - VolumeArgs volumeArgs = VolumeArgs.newBuilder() - .setOwner("bilbo") - .setQuota("100TB") - .setAdmin("hdfs") - .build(); - clientProtocol.createVolume(volumeName, volumeArgs); - OzoneVolume vol = clientProtocol.getVolumeDetails(volumeName); - - assertEquals(vol.getName(), volumeName); - assertEquals(vol.getAdmin(), "hdfs"); - assertEquals(vol.getOwner(), "bilbo"); - assertEquals(vol.getQuota(), OzoneQuota.parseQuota("100TB").sizeInBytes()); - - // verify the key creation time - assertTrue((vol.getCreationTime() - / 1000) >= (currentTime / 1000)); - - // Test create a volume with invalid volume name, - // not use Rule here because the test method is static. - try { - String invalidVolumeName = "#" + OzoneUtils.getRequestID().toLowerCase(); - clientProtocol.createVolume(invalidVolumeName); - /* - //TODO: RestClient and RpcClient should use HddsClientUtils to verify name - fail("Except the volume creation be failed because the" - + " volume name starts with an invalid char #");*/ - } catch (Exception e) { - assertTrue(e.getMessage().contains("Bucket or Volume name" - + " has an unsupported character : #")); - } - } - - @Test - public void testCreateDuplicateVolume() throws Exception { - runTestCreateDuplicateVolume(client); - } - - static void runTestCreateDuplicateVolume(ClientProtocol clientProtocol) - throws Exception { - - clientProtocol.createVolume("testvol"); - OzoneTestUtils.expectOmException(ResultCodes.VOLUME_ALREADY_EXISTS, - () -> clientProtocol.createVolume("testvol")); - } - - @Test - public void testDeleteVolume() throws OzoneException, IOException { - runTestDeleteVolume(client); - } - - static void runTestDeleteVolume(ClientProtocol clientProtocol) - throws OzoneException, IOException { - String volumeName = OzoneUtils.getRequestID().toLowerCase(); - clientProtocol.createVolume(volumeName); - clientProtocol.deleteVolume(volumeName); - } - - @Test - public void testChangeOwnerOnVolume() throws Exception { - runTestChangeOwnerOnVolume(client); - } - - static void runTestChangeOwnerOnVolume(ClientProtocol clientProtocol) - throws OzoneException, ParseException, IOException { - String volumeName = OzoneUtils.getRequestID().toLowerCase(); - clientProtocol.createVolume(volumeName); - clientProtocol.getVolumeDetails(volumeName); - clientProtocol.setVolumeOwner(volumeName, "frodo"); - OzoneVolume newVol = clientProtocol.getVolumeDetails(volumeName); - assertEquals(newVol.getOwner(), "frodo"); - // verify if the creation time is missing after setting owner operation - assertTrue(newVol.getCreationTime() > 0); - } - - @Test - public void testChangeQuotaOnVolume() throws Exception { - runTestChangeQuotaOnVolume(client); - } - - static void runTestChangeQuotaOnVolume(ClientProtocol clientProtocol) - throws OzoneException, IOException, ParseException { - String volumeName = OzoneUtils.getRequestID().toLowerCase(); - clientProtocol.createVolume(volumeName); - clientProtocol.setVolumeQuota(volumeName, OzoneQuota.parseQuota("1000MB")); - OzoneVolume newVol = clientProtocol.getVolumeDetails(volumeName); - assertEquals(newVol.getQuota(), - OzoneQuota.parseQuota("1000MB").sizeInBytes()); - // verify if the creation time is missing after setting quota operation - assertTrue(newVol.getCreationTime() > 0); - } - - // Listing all volumes in the cluster feature has to be fixed after HDDS-357. - // TODO: fix this - @Ignore - @Test - public void testListVolume() throws OzoneException, IOException { - runTestListVolume(client); - } - - static void runTestListVolume(ClientProtocol clientProtocol) - throws OzoneException, IOException { - for (int x = 0; x < 10; x++) { - String volumeName = OzoneUtils.getRequestID().toLowerCase(); - clientProtocol.createVolume(volumeName); - } - - List ovols = clientProtocol.listVolumes( - UserGroupInformation.getCurrentUser().getUserName(), null, null, 100); - assertTrue(ovols.size() >= 10); - } - - // TODO: remove @Ignore below once the problem has been resolved. - @Ignore("Takes 3m to run, disable for now.") - @Test - public void testListVolumePagination() throws OzoneException, IOException { - runTestListVolumePagination(client); - } - - static void runTestListVolumePagination(ClientProtocol clientProtocol) - throws OzoneException, IOException { - final int volCount = 2000; - final int step = 100; - for (int x = 0; x < volCount; x++) { - String volumeName = OzoneUtils.getRequestID().toLowerCase(); - clientProtocol.createVolume(volumeName); - } - String prevKey = null; - int count = 0; - int pagecount = 0; - while (count < volCount) { - List ovols = clientProtocol.listVolumes(null, prevKey, step); - count += ovols.size(); - prevKey = ovols.get(ovols.size() - 1).getName(); - pagecount++; - } - assertEquals(volCount / step, pagecount); - } - - // TODO: remove @Ignore below once the problem has been resolved. - @Ignore - @Test - public void testListAllVolumes() throws OzoneException, IOException { - runTestListAllVolumes(client); - } - - static void runTestListAllVolumes(ClientProtocol clientProtocol) - throws OzoneException, IOException { - final int volCount = 200; - final int step = 10; - for (int x = 0; x < volCount; x++) { - String userName = - "frodo" + RandomStringUtils.randomAlphabetic(5).toLowerCase(); - String volumeName = - "vol" + RandomStringUtils.randomAlphabetic(5).toLowerCase(); - VolumeArgs volumeArgs = VolumeArgs.newBuilder() - .setOwner(userName) - .setQuota("100TB") - .setAdmin("hdfs") - .build(); - clientProtocol.createVolume(volumeName, volumeArgs); - OzoneVolume vol = clientProtocol.getVolumeDetails(volumeName); - assertNotNull(vol); - } - String prevKey = null; - int count = 0; - int pagecount = 0; - while (count < volCount) { - List ovols = clientProtocol.listVolumes(null, prevKey, step); - count += ovols.size(); - if (ovols.size() > 0) { - prevKey = ovols.get(ovols.size() - 1).getName(); - } - pagecount++; - } - // becasue we are querying an existing ozone store, there will - // be volumes created by other tests too. So we should get more page counts. - assertEquals(volCount / step, pagecount); - } - - // Listing all volumes in the cluster feature has to be fixed after HDDS-357. - // TODO: fix this - @Ignore - @Test - public void testListVolumes() throws Exception { - runTestListVolumes(client); - } - - static void runTestListVolumes(ClientProtocol clientProtocol) - throws OzoneException, IOException, ParseException { - final int volCount = 20; - final String user1 = "test-user-a"; - final String user2 = "test-user-b"; - - long currentTime = Time.now(); - // Create 20 volumes, 10 for user1 and another 10 for user2. - for (int x = 0; x < volCount; x++) { - String volumeName; - String userName; - - if (x % 2 == 0) { - // create volume [test-vol0, test-vol2, ..., test-vol18] for user1 - userName = user1; - volumeName = "test-vol" + x; - } else { - // create volume [test-vol1, test-vol3, ..., test-vol19] for user2 - userName = user2; - volumeName = "test-vol" + x; - } - VolumeArgs volumeArgs = VolumeArgs.newBuilder() - .setOwner(userName) - .setQuota("100TB") - .setAdmin("hdfs") - .build(); - clientProtocol.createVolume(volumeName, volumeArgs); - OzoneVolume vol = clientProtocol.getVolumeDetails(volumeName); - assertNotNull(vol); - } - - // list all the volumes belong to user1 - List volumeList = - clientProtocol.listVolumes(user1, null, null, 100); - assertEquals(10, volumeList.size()); - // verify the owner name and creation time of volume - for (OzoneVolume vol : volumeList) { - assertTrue(vol.getOwner().equals(user1)); - assertTrue((vol.getCreationTime() - / 1000) >= (currentTime / 1000)); - } - - // test max key parameter of listing volumes - volumeList = clientProtocol.listVolumes(user1, null, null, 2); - assertEquals(2, volumeList.size()); - - // test prefix parameter of listing volumes - volumeList = clientProtocol.listVolumes(user1, "test-vol10", null, 10); - assertTrue(volumeList.size() == 1 - && volumeList.get(0).getName().equals("test-vol10")); - - volumeList = clientProtocol.listVolumes(user1, "test-vol1", null, 10); - assertEquals(5, volumeList.size()); - - // test start key parameter of listing volumes - volumeList = clientProtocol.listVolumes(user2, null, "test-vol15", 10); - assertEquals(2, volumeList.size()); - - String volumeName; - for (int x = 0; x < volCount; x++) { - volumeName = "test-vol" + x; - clientProtocol.deleteVolume(volumeName); - } - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolumeRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolumeRatis.java deleted file mode 100644 index 27074a302a0..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolumeRatis.java +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.client; - -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; - -import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.ozone.client.rest.OzoneException; -import org.apache.hadoop.ozone.client.rest.RestClient; -import org.apache.hadoop.ozone.client.rpc.RpcClient; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; -import org.junit.*; -import org.junit.rules.Timeout; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -import java.io.File; -import java.io.IOException; -import java.util.Arrays; -import java.util.Collection; - -/** The same as {@link TestVolume} except that this test is Ratis enabled. */ -@Ignore("Disabling Ratis tests for pipeline work.") -@RunWith(value = Parameterized.class) -public class TestVolumeRatis { - @Rule - public Timeout testTimeout = new Timeout(300000); - private static ClientProtocol client; - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf; - - @Parameterized.Parameters - public static Collection clientProtocol() { - Object[][] params = new Object[][] { - {RpcClient.class}, - {RestClient.class}}; - return Arrays.asList(params); - } - - @Parameterized.Parameter - @SuppressWarnings("visibilitymodifier") - public Class clientProtocol; - - @BeforeClass - public static void init() throws Exception { - conf = new OzoneConfiguration(); - - // This enables Ratis in the cluster. - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, true); - - - String path = GenericTestUtils - .getTempPath(TestVolume.class.getSimpleName()); - FileUtils.deleteDirectory(new File(path)); - - Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG); - - cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).build(); - cluster.waitForClusterToBeReady(); - final int port = cluster.getHddsDatanodes().get(0) - .getDatanodeDetails() - .getPort(DatanodeDetails.Port.Name.REST).getValue(); - } - - @Before - public void setup() throws Exception { - if (clientProtocol.equals(RestClient.class)) { - client = new RestClient(conf); - } else { - client = new RpcClient(conf); - } - } - - @AfterClass - public static void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - - } - - @Test - public void testCreateVolume() throws Exception { - TestVolume.runTestCreateVolume(client); - } - - @Test - public void testCreateDuplicateVolume() throws Exception { - TestVolume.runTestCreateDuplicateVolume(client); - } - - @Test - public void testDeleteVolume() throws OzoneException, IOException { - TestVolume.runTestDeleteVolume(client); - } - - @Test - public void testChangeOwnerOnVolume() throws Exception { - TestVolume.runTestChangeOwnerOnVolume(client); - } - - @Test - public void testChangeQuotaOnVolume() throws Exception { - TestVolume.runTestChangeQuotaOnVolume(client); - } - - // TODO: remove @Ignore below once the problem has been resolved. - @Ignore("listVolumes not implemented in DistributedStorageHandler") - @Test - public void testListVolume() throws OzoneException, IOException { - TestVolume.runTestListVolume(client); - } - - // TODO: remove @Ignore below once the problem has been resolved. - @Ignore("See TestVolume.testListVolumePagination()") - @Test - public void testListVolumePagination() throws OzoneException, IOException { - TestVolume.runTestListVolumePagination(client); - } - - // TODO: remove @Ignore below once the problem has been resolved. - @Ignore("See TestVolume.testListAllVolumes()") - @Test - public void testListAllVolumes() throws Exception { - TestVolume.runTestListAllVolumes(client); - } - - @Ignore("Disabling Ratis tests for pipeline work.") - @Test - public void testListVolumes() throws Exception { - TestVolume.runTestListVolumes(client); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/package-info.java deleted file mode 100644 index 91a013c8452..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * REST client tests. - */ -package org.apache.hadoop.ozone.web.client; \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/package-info.java deleted file mode 100644 index fba9a394615..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Rest Client Tests. - */ -package org.apache.hadoop.ozone.web; \ No newline at end of file diff --git a/hadoop-ozone/objectstore-service/pom.xml b/hadoop-ozone/objectstore-service/pom.xml deleted file mode 100644 index 47d7e3782a8..00000000000 --- a/hadoop-ozone/objectstore-service/pom.xml +++ /dev/null @@ -1,126 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-ozone - 0.5.0-SNAPSHOT - - hadoop-ozone-objectstore-service - 0.5.0-SNAPSHOT - Apache Hadoop Ozone Object Store REST Service - Apache Hadoop Ozone Object Store REST Service - jar - - - - - org.apache.hadoop - hadoop-ozone-common - - - - org.apache.hadoop - hadoop-ozone-client - - - - org.apache.hadoop - hadoop-common - provided - - - - org.apache.hadoop - hadoop-hdfs - provided - - - - org.apache.hadoop - hadoop-hdfs-client - provided - - - com.squareup.okhttp - okhttp - - - - - - - junit - junit - test - - - - io.swagger - swagger-annotations - 1.5.9 - provided - - - - org.mockito - mockito-core - 2.2.0 - test - - - - - - - org.apache.maven.plugins - maven-shade-plugin - 3.1.1 - - true - plugin - - - - *:*:*:* - - - - - *:* - - META-INF/*.SF - META-INF/*.DSA - META-INF/*.RSA - - - - - - - package - - shade - - - - - - - diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java deleted file mode 100644 index 824eb402fae..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java +++ /dev/null @@ -1,160 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdfs.server.datanode; - -import java.io.Closeable; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.HashMap; -import java.util.Map; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; -import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; -import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ipc.Client; -import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; -import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; -import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB; -import org.apache.hadoop.ozone.web.ObjectStoreApplication; -import org.apache.hadoop.ozone.web.handlers.ServiceFilter; -import org.apache.hadoop.ozone.web.interfaces.StorageHandler; -import org.apache.hadoop.ozone.web.netty.ObjectStoreJerseyContainer; -import org.apache.hadoop.ozone.web.storage.DistributedStorageHandler; -import org.apache.hadoop.security.UserGroupInformation; - -import com.sun.jersey.api.container.ContainerFactory; -import com.sun.jersey.api.core.ApplicationAdapter; -import static com.sun.jersey.api.core.ResourceConfig.FEATURE_TRACE; -import static com.sun.jersey.api.core.ResourceConfig.PROPERTY_CONTAINER_REQUEST_FILTERS; -import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients; -import static org.apache.hadoop.ozone.OmUtils.getOmAddress; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_TRACE_ENABLED_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_TRACE_ENABLED_KEY; -import org.apache.ratis.protocol.ClientId; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Implements object store handling within the DataNode process. This class is - * responsible for initializing and maintaining the RPC clients and servers and - * the web application required for the object store implementation. - */ -public final class ObjectStoreHandler implements Closeable { - - private static final Logger LOG = - LoggerFactory.getLogger(ObjectStoreHandler.class); - - private final ObjectStoreJerseyContainer objectStoreJerseyContainer; - private final OzoneManagerProtocol ozoneManagerClient; - private final StorageContainerLocationProtocol - storageContainerLocationClient; - - private final StorageHandler storageHandler; - private ClientId clientId = ClientId.randomId(); - - /** - * Creates a new ObjectStoreHandler. - * - * @param conf configuration - * @throws IOException if there is an I/O error - */ - public ObjectStoreHandler(Configuration conf) throws IOException { - boolean ozoneTrace = conf.getBoolean(OZONE_TRACE_ENABLED_KEY, - OZONE_TRACE_ENABLED_DEFAULT); - - // Initialize Jersey container for object store web application. - RPC.setProtocolEngine(conf, StorageContainerLocationProtocolPB.class, - ProtobufRpcEngine.class); - long scmVersion = - RPC.getProtocolVersion(StorageContainerLocationProtocolPB.class); - - InetSocketAddress scmAddress = - getScmAddressForClients(conf); - this.storageContainerLocationClient = - TracingUtil.createProxy( - new StorageContainerLocationProtocolClientSideTranslatorPB( - RPC.getProxy(StorageContainerLocationProtocolPB.class, - scmVersion, - scmAddress, UserGroupInformation.getCurrentUser(), conf, - NetUtils.getDefaultSocketFactory(conf), - Client.getRpcTimeout(conf))), - StorageContainerLocationProtocol.class, conf); - - RPC.setProtocolEngine(conf, OzoneManagerProtocolPB.class, - ProtobufRpcEngine.class); - long omVersion = - RPC.getProtocolVersion(OzoneManagerProtocolPB.class); - InetSocketAddress omAddress = getOmAddress(conf); - this.ozoneManagerClient = - TracingUtil.createProxy( - new OzoneManagerProtocolClientSideTranslatorPB( - RPC.getProxy(OzoneManagerProtocolPB.class, omVersion, - omAddress, UserGroupInformation.getCurrentUser(), conf, - NetUtils.getDefaultSocketFactory(conf), - Client.getRpcTimeout(conf)), clientId.toString()), - OzoneManagerProtocol.class, conf); - - storageHandler = new DistributedStorageHandler( - new OzoneConfiguration(conf), - TracingUtil.createProxy(storageContainerLocationClient, - StorageContainerLocationProtocol.class, conf), - this.ozoneManagerClient); - ApplicationAdapter aa = - new ApplicationAdapter(new ObjectStoreApplication()); - Map settingsMap = new HashMap<>(); - settingsMap.put(PROPERTY_CONTAINER_REQUEST_FILTERS, - ServiceFilter.class.getCanonicalName()); - settingsMap.put(FEATURE_TRACE, ozoneTrace); - aa.setPropertiesAndFeatures(settingsMap); - this.objectStoreJerseyContainer = ContainerFactory.createContainer( - ObjectStoreJerseyContainer.class, aa); - this.objectStoreJerseyContainer.setStorageHandler(storageHandler); - } - - /** - * Returns the initialized web application container. - * - * @return initialized web application container - */ - public ObjectStoreJerseyContainer getObjectStoreJerseyContainer() { - return this.objectStoreJerseyContainer; - } - - /** - * Returns the storage handler. - * - * @return returns the storage handler - */ - public StorageHandler getStorageHandler() { - return this.storageHandler; - } - - @Override - public void close() { - LOG.info("Closing ObjectStoreHandler."); - storageHandler.close(); - IOUtils.cleanupWithLogger(LOG, storageContainerLocationClient); - IOUtils.cleanupWithLogger(LOG, ozoneManagerClient); - } -} diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/package-info.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/package-info.java deleted file mode 100644 index e853f6743dd..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.server.datanode; - -/** - * Object store related service inside the datanode. - */ \ No newline at end of file diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/OzoneRestUtils.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/OzoneRestUtils.java deleted file mode 100644 index b13ae30e6c4..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/OzoneRestUtils.java +++ /dev/null @@ -1,222 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone; - -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.Request; -import javax.ws.rs.core.Response; -import java.text.ParseException; -import java.text.SimpleDateFormat; -import java.util.Date; -import java.util.List; -import java.util.Locale; -import java.util.TimeZone; -import java.util.UUID; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdds.scm.client.HddsClientUtils; -import org.apache.hadoop.ozone.client.io.LengthInputStream; -import org.apache.hadoop.ozone.client.rest.OzoneException; -import org.apache.hadoop.ozone.client.rest.headers.Header; -import org.apache.hadoop.ozone.web.exceptions.ErrorTable; -import org.apache.hadoop.ozone.web.handlers.UserArgs; -import org.apache.hadoop.util.Time; - -import com.google.common.base.Preconditions; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Set of Utility functions used in ozone. - */ -@InterfaceAudience.Private -public final class OzoneRestUtils { - - private static final Logger LOG = LoggerFactory.getLogger( - OzoneRestUtils.class); - - - private OzoneRestUtils() { - // Never constructed - } - - /** - * Date format that used in ozone. Here the format is thread safe to use. - */ - private static final ThreadLocal DATE_FORMAT = - new ThreadLocal() { - @Override - protected SimpleDateFormat initialValue() { - SimpleDateFormat format = new SimpleDateFormat( - OzoneConsts.OZONE_DATE_FORMAT, Locale.US); - format.setTimeZone(TimeZone.getTimeZone(OzoneConsts.OZONE_TIME_ZONE)); - - return format; - } - }; - - /** - * verifies that bucket name / volume name is a valid DNS name. - * - * @param resName Bucket or volume Name to be validated - * - * @throws IllegalArgumentException - */ - public static void verifyResourceName(String resName) - throws IllegalArgumentException { - HddsClientUtils.verifyResourceName(resName); - } - - /** - * Returns a random Request ID. - * - * Request ID is returned to the client as well as flows through the system - * facilitating debugging on why a certain request failed. - * - * @return String random request ID - */ - public static String getRequestID() { - return UUID.randomUUID().toString(); - } - - - - /** - * Basic validate routine to make sure that all the - * required headers are in place. - * - * @param request - http request - * @param headers - http headers - * @param reqId - request id - * @param resource - Resource Name - * @param hostname - Hostname - * - * @throws OzoneException - */ - public static void validate(Request request, HttpHeaders headers, - String reqId, String resource, String hostname) - throws OzoneException { - - List ozHeader = - headers.getRequestHeader(Header.OZONE_VERSION_HEADER); - if (ozHeader == null) { - throw ErrorTable - .newError(ErrorTable.MISSING_VERSION, reqId, resource, hostname); - } - - List date = headers.getRequestHeader(HttpHeaders.DATE); - if (date == null) { - throw ErrorTable - .newError(ErrorTable.MISSING_DATE, reqId, resource, hostname); - } - - /* - TODO : - Ignore the results for time being. Eventually we can validate if the - request Date time is too skewed and reject if it is so. - */ - parseDate(date.get(0), reqId, resource, hostname); - - } - - /** - * Parses the Date String coming from the Users. - * - * @param dateString - Date String - * @param reqID - Ozone Request ID - * @param resource - Resource Name - * @param hostname - HostName - * - * @return - Date - * - * @throws OzoneException - in case of parsing error - */ - public static synchronized Date parseDate(String dateString, String reqID, - String resource, String hostname) - throws OzoneException { - try { - return DATE_FORMAT.get().parse(dateString); - } catch (ParseException ex) { - OzoneException exp = - ErrorTable.newError(ErrorTable.BAD_DATE, reqID, resource, hostname); - exp.setMessage(ex.getMessage()); - throw exp; - } - } - - /** - * Returns a response with appropriate OZONE headers and payload. - * - * @param args - UserArgs or Inherited class - * @param statusCode - HttpStatus code - * @param payload - Content Body - * - * @return JAX-RS Response - */ - public static Response getResponse(UserArgs args, int statusCode, - String payload) { - String date = DATE_FORMAT.get().format(new Date(Time.now())); - return Response.ok(payload) - .header(Header.OZONE_SERVER_NAME, args.getHostName()) - .header(Header.OZONE_REQUEST_ID, args.getRequestID()) - .header(HttpHeaders.DATE, date).status(statusCode).build(); - } - - /** - * Returns a response with appropriate OZONE headers and payload. - * - * @param args - UserArgs or Inherited class - * @param statusCode - HttpStatus code - * @param stream InputStream - * - * @return JAX-RS Response - */ - public static Response getResponse(UserArgs args, int statusCode, - LengthInputStream stream) { - String date = DATE_FORMAT.get().format(new Date(Time.now())); - return Response.ok(stream, MediaType.APPLICATION_OCTET_STREAM) - .header(Header.OZONE_SERVER_NAME, args.getHostName()) - .header(Header.OZONE_REQUEST_ID, args.getRequestID()) - .header(HttpHeaders.DATE, date).status(statusCode) - .header(HttpHeaders.CONTENT_LENGTH, stream.getLength()) - .build(); - - } - - - - /** - * Convert time in millisecond to a human readable format required in ozone. - * @return a human readable string for the input time - */ - public static String formatTime(long millis) { - return DATE_FORMAT.get().format(millis); - } - - /** - * Convert time in ozone date format to millisecond. - * @return time in milliseconds - */ - public static long formatDate(String date) throws ParseException { - Preconditions.checkNotNull(date, "Date string should not be null."); - return DATE_FORMAT.get().parse(date).getTime(); - } - -} diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/package-info.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/package-info.java deleted file mode 100644 index 973c8f36fb4..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone; - -/** - * Ozone related generic classes. - */ \ No newline at end of file diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/ObjectStoreApplication.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/ObjectStoreApplication.java deleted file mode 100644 index 7dd9a339bde..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/ObjectStoreApplication.java +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web; - -import org.apache.hadoop.ozone.client.rest.OzoneExceptionMapper; -import org.apache.hadoop.ozone.web.handlers.BucketHandler; -import org.apache.hadoop.ozone.web.handlers.KeyHandler; -import org.apache.hadoop.ozone.web.handlers.ServiceFilter; -import org.apache.hadoop.ozone.web.handlers.VolumeHandler; -import org.apache.hadoop.ozone.web.messages.LengthInputStreamMessageBodyWriter; -import org.apache.hadoop.ozone.web.messages.StringMessageBodyWriter; - -import javax.ws.rs.core.Application; -import java.util.HashSet; -import java.util.Set; - -/** - * Ozone Application. - */ -public class ObjectStoreApplication extends Application { - public ObjectStoreApplication() { - super(); - } - - @Override - public Set> getClasses() { - HashSet> set = new HashSet<>(); - set.add(BucketHandler.class); - set.add(VolumeHandler.class); - set.add(KeyHandler.class); - set.add(OzoneExceptionMapper.class); - set.add(LengthInputStreamMessageBodyWriter.class); - set.add(StringMessageBodyWriter.class); - return set; - } - - @Override - public Set getSingletons() { - HashSet set = new HashSet<>(); - set.add(ServiceFilter.class); - return set; - } -} diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/OzoneHddsDatanodeService.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/OzoneHddsDatanodeService.java deleted file mode 100644 index 621b659711e..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/OzoneHddsDatanodeService.java +++ /dev/null @@ -1,89 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.web; - -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; -import org.apache.hadoop.ozone.HddsDatanodeService; -import org.apache.hadoop.ozone.web.netty.ObjectStoreRestHttpServer; - -import org.apache.commons.io.IOUtils; -import org.apache.hadoop.util.ServicePlugin; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * DataNode service plugin implementation to start ObjectStore rest server. - */ -public class OzoneHddsDatanodeService implements ServicePlugin { - - private static final Logger LOG = - LoggerFactory.getLogger(OzoneHddsDatanodeService.class); - - private Configuration conf; - private ObjectStoreHandler handler; - private ObjectStoreRestHttpServer objectStoreRestHttpServer; - - @Override - public void start(Object service) { - if (service instanceof HddsDatanodeService) { - try { - HddsDatanodeService hddsDatanodeService = (HddsDatanodeService) service; - conf = hddsDatanodeService.getConf(); - handler = new ObjectStoreHandler(conf); - objectStoreRestHttpServer = new ObjectStoreRestHttpServer( - conf, null, handler); - objectStoreRestHttpServer.start(); - DatanodeDetails.Port restPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.REST, - objectStoreRestHttpServer.getHttpAddress().getPort()); - hddsDatanodeService.getDatanodeDetails().setPort(restPort); - - } catch (IOException e) { - throw new RuntimeException("Can't start the Object Store Rest server", - e); - } - } else { - LOG.error("Not starting {}, as the plugin is not invoked through {}", - OzoneHddsDatanodeService.class.getSimpleName(), - HddsDatanodeService.class.getSimpleName()); - } - } - - - @Override - public void stop() { - try { - if (handler != null) { - handler.close(); - } - } catch (Exception e) { - throw new RuntimeException("Can't stop the Object Store Rest server", e); - } - } - - @Override - public void close() { - IOUtils.closeQuietly(objectStoreRestHttpServer); - IOUtils.closeQuietly(handler); - } - -} diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/exceptions/ErrorTable.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/exceptions/ErrorTable.java deleted file mode 100644 index 16892e7db64..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/exceptions/ErrorTable.java +++ /dev/null @@ -1,225 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.exceptions; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.ozone.client.rest.OzoneException; -import org.apache.hadoop.ozone.web.handlers.UserArgs; - -import static java.net.HttpURLConnection.HTTP_BAD_REQUEST; -import static java.net.HttpURLConnection.HTTP_CONFLICT; -import static java.net.HttpURLConnection.HTTP_FORBIDDEN; -import static java.net.HttpURLConnection.HTTP_INTERNAL_ERROR; -import static java.net.HttpURLConnection.HTTP_NOT_FOUND; -import static java.net.HttpURLConnection.HTTP_UNAUTHORIZED; - -/** - * Error Table represents the Errors from Ozone Rest API layer. - * - * Please note : The errors in this table are sorted by the HTTP_ERROR codes - * if you add new error codes to this table please follow the same convention. - */ -@InterfaceAudience.Private -public final class ErrorTable { - - /* Error 400 */ - public static final OzoneException MISSING_VERSION = - new OzoneException(HTTP_BAD_REQUEST, "missingVersion", - "x-ozone-version header is required."); - - public static final OzoneException MISSING_DATE = - new OzoneException(HTTP_BAD_REQUEST, "missingDate", - "Date header is required."); - - public static final OzoneException BAD_DATE = - new OzoneException(HTTP_BAD_REQUEST, "badDate", - "Unable to parse date format."); - - public static final OzoneException MALFORMED_QUOTA = - new OzoneException(HTTP_BAD_REQUEST, "malformedQuota", - "Invalid quota specified."); - - public static final OzoneException MALFORMED_ACL = - new OzoneException(HTTP_BAD_REQUEST, "malformedACL", - "Invalid ACL specified."); - - - public static final OzoneException INVALID_VOLUME_NAME = - new OzoneException(HTTP_BAD_REQUEST, "invalidVolumeName", - "Invalid volume name."); - - public static final OzoneException INVALID_QUERY_PARAM = - new OzoneException(HTTP_BAD_REQUEST, "invalidQueryParam", - "Invalid query parameter."); - - public static final OzoneException INVALID_RESOURCE_NAME = - new OzoneException(HTTP_BAD_REQUEST, "invalidResourceName", - "Invalid volume, bucket or key name."); - - public static final OzoneException INVALID_BUCKET_NAME = - new OzoneException(HTTP_BAD_REQUEST, "invalidBucketName", - "Invalid bucket name."); - - public static final OzoneException INVALID_KEY = - new OzoneException(HTTP_BAD_REQUEST, "invalidKey", "Invalid key."); - - public static final OzoneException INVALID_REQUEST = - new OzoneException(HTTP_BAD_REQUEST, "invalidRequest", - "Error in request."); - - public static final OzoneException MALFORMED_BUCKET_VERSION = - new OzoneException(HTTP_BAD_REQUEST, "malformedBucketVersion", - "Malformed bucket version or version not unique."); - - public static final OzoneException MALFORMED_STORAGE_TYPE = - new OzoneException(HTTP_BAD_REQUEST, "malformedStorageType", - "Invalid storage Type specified."); - - public static final OzoneException MALFORMED_STORAGE_CLASS = - new OzoneException(HTTP_BAD_REQUEST, "malformedStorageClass", - "Invalid storage class specified."); - - public static final OzoneException BAD_DIGEST = - new OzoneException(HTTP_BAD_REQUEST, "badDigest", - "Content MD5 does not match."); - - public static final OzoneException INCOMPLETE_BODY = - new OzoneException(HTTP_BAD_REQUEST, "incompleteBody", - "Content length does not match stream size."); - - public static final OzoneException BAD_AUTHORIZATION = - new OzoneException(HTTP_BAD_REQUEST, "badAuthorization", - "Missing authorization or authorization has to be " + - "unique."); - - public static final OzoneException BAD_PROPERTY = - new OzoneException(HTTP_BAD_REQUEST, "unknownProperty", - "This property is not supported by this server."); - - /* Error 401 */ - public static final OzoneException UNAUTHORIZED = - new OzoneException(HTTP_UNAUTHORIZED, "Unauthorized", - "Access token is missing or invalid token."); - - /* Error 403 */ - public static final OzoneException ACCESS_DENIED = - new OzoneException(HTTP_FORBIDDEN, "accessDenied", "Access denied."); - - /* Error 404 */ - public static final OzoneException USER_NOT_FOUND = - new OzoneException(HTTP_NOT_FOUND, "userNotFound", "Invalid user name."); - - public static final OzoneException VOLUME_NOT_FOUND = - new OzoneException(HTTP_NOT_FOUND, "volumeNotFound", "No such volume."); - - /* Error 409 */ - public static final OzoneException VOLUME_ALREADY_EXISTS = - new OzoneException(HTTP_CONFLICT, "volumeAlreadyExists", - "Duplicate volume name."); - - public static final OzoneException BUCKET_ALREADY_EXISTS = - new OzoneException(HTTP_CONFLICT, "bucketAlreadyExists", - "Duplicate bucket name."); - - public static final OzoneException VOLUME_NOT_EMPTY = - new OzoneException(HTTP_CONFLICT, "volumeNotEmpty", - "Volume must not have any buckets."); - - public static final OzoneException BUCKET_NOT_EMPTY = - new OzoneException(HTTP_CONFLICT, "bucketNotEmpty", - "Bucket must not have any keys."); - - public static final OzoneException KEY_OPERATION_CONFLICT = - new OzoneException(HTTP_CONFLICT, "keyOperationConflict", - "Conflicting operation on the specified key is going" + - " on."); - - /* Error 500 */ - public static final OzoneException SERVER_ERROR = - new OzoneException(HTTP_INTERNAL_ERROR, "internalServerError", - "Internal server error."); - - /** - * Create a new instance of Error. - * - * @param e Error Template - * @param requestID Request ID - * @param resource Resource Name - * @param hostID hostID - * - * @return creates a new instance of error based on the template - */ - public static OzoneException newError(OzoneException e, String requestID, - String resource, String hostID) { - OzoneException err = - new OzoneException(e.getHttpCode(), e.getShortMessage(), - e.getMessage()); - err.setRequestId(requestID); - err.setResource(resource); - err.setHostID(hostID); - return err; - } - - /** - * Create new instance of Error. - * - * @param e - Error Template - * @param args - Args - * - * @return Ozone Exception - */ - public static OzoneException newError(OzoneException e, UserArgs args) { - OzoneException err = - new OzoneException(e.getHttpCode(), e.getShortMessage(), - e.getMessage()); - if (args != null) { - err.setRequestId(args.getRequestID()); - err.setResource(args.getResourceName()); - err.setHostID(args.getHostName()); - } - return err; - } - - /** - * Create new instance of Error. - * - * @param e - Error Template - * @param args - Args - * @param ex Exception - * - * @return Ozone Exception - */ - public static OzoneException newError(OzoneException e, UserArgs args, - Exception ex) { - OzoneException err = - new OzoneException(e.getHttpCode(), e.getShortMessage(), ex); - - if(args != null) { - err.setRequestId(args.getRequestID()); - err.setResource(args.getResourceName()); - err.setHostID(args.getHostName()); - } - err.setMessage(ex.getMessage()); - return err; - } - - private ErrorTable() { - // Never constructed. - } -} diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/exceptions/package-info.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/exceptions/package-info.java deleted file mode 100644 index 59cf724c50e..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/exceptions/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.exceptions; - -/** - This package contains ozone client side libraries. - */ diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketHandler.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketHandler.java deleted file mode 100644 index 3c73fce7781..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketHandler.java +++ /dev/null @@ -1,190 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.ozone.web.handlers; - -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.OzoneRestUtils; -import org.apache.hadoop.ozone.client.rest.OzoneException; -import org.apache.hadoop.ozone.client.rest.headers.Header; -import org.apache.hadoop.ozone.web.exceptions.ErrorTable; -import org.apache.hadoop.ozone.web.interfaces.Bucket; -import org.apache.hadoop.ozone.web.interfaces.StorageHandler; -import org.slf4j.MDC; - -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.Request; -import javax.ws.rs.core.Response; -import javax.ws.rs.core.UriInfo; -import java.io.IOException; - -import static java.net.HttpURLConnection.HTTP_CREATED; -import static java.net.HttpURLConnection.HTTP_OK; -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_FUNCTION; - - -/** - * Bucket Class handles all ozone Bucket related actions. - */ -public class BucketHandler implements Bucket { - /** - * createBucket call handles the POST request for Creating a Bucket. - * - * @param volume - Volume name - * @param bucket - Bucket Name - * @param req - Http request - * @param info - Uri Info - * @param headers - Http headers - * - * @return Response - * - * @throws OzoneException - */ - @Override - public Response createBucket(String volume, String bucket, Request req, - UriInfo info, HttpHeaders headers) - throws OzoneException { - MDC.put(OZONE_FUNCTION, "createBucket"); - return new BucketProcessTemplate() { - @Override - public Response doProcess(BucketArgs args) - throws OzoneException, IOException { - StorageHandler fs = StorageHandlerBuilder.getStorageHandler(); - args.setVersioning(getVersioning(args)); - args.setStorageType(getStorageType(args)); - fs.createBucket(args); - return OzoneRestUtils.getResponse(args, HTTP_CREATED, ""); - } - }.handleCall(volume, bucket, req, info, headers); - } - - /** - * updateBucket call handles the PUT request for updating a Bucket. - * - * There are only three possible actions currently with updateBucket. - * They are add/remove on ACLS, Bucket Versioning and StorageType. - * if you make a call with any other action, update just returns 200 OK. - * - * @param volume - Storage volume name - * @param bucket - Bucket name - * @param req - Http request - * @param info - Uri Info - * @param headers - Http headers - * - * @return Response - * - * @throws OzoneException - */ - @Override - public Response updateBucket(String volume, String bucket, Request req, - UriInfo info, HttpHeaders headers) - throws OzoneException { - MDC.put(OZONE_FUNCTION, "updateBucket"); - return new BucketProcessTemplate() { - @Override - public Response doProcess(BucketArgs args) - throws OzoneException, IOException { - StorageHandler fs = StorageHandlerBuilder.getStorageHandler(); - args.setVersioning(getVersioning(args)); - args.setStorageType(getStorageType(args)); - if (args.getVersioning() != OzoneConsts.Versioning.NOT_DEFINED) { - fs.setBucketVersioning(args); - } - - if (args.getStorageType() != null) { - fs.setBucketStorageClass(args); - } - return OzoneRestUtils.getResponse(args, HTTP_OK, ""); - } - }.handleCall(volume, bucket, req, info, headers); - } - - /** - * Deletes an empty bucket. - * - * @param volume Volume name - * @param bucket Bucket Name - * @param req - Http request - * @param info - Uri Info - * @param headers - Http headers - * - * @return Response - * - * @throws OzoneException - */ - @Override - public Response deleteBucket(String volume, String bucket, Request req, - UriInfo info, HttpHeaders headers) - throws OzoneException { - MDC.put(OZONE_FUNCTION, "deleteBucket"); - return new BucketProcessTemplate() { - @Override - public Response doProcess(BucketArgs args) - throws OzoneException, IOException { - StorageHandler fs = StorageHandlerBuilder.getStorageHandler(); - fs.deleteBucket(args); - return OzoneRestUtils.getResponse(args, HTTP_OK, ""); - } - }.handleCall(volume, bucket, req, info, headers); - } - - /** - * List Buckets allows the user to list the bucket. - * - * @param volume - Storage Volume Name - * @param bucket - Bucket Name - * @param info - Uri Info - * @param prefix - Prefix for the keys to be fetched - * @param maxKeys - MaxNumber of Keys to Return - * @param startPage - Continuation Token - * @param req - Http request - * @param headers - Http headers - * - * @return - Json Body - * - * @throws OzoneException - */ - @Override - public Response listBucket(String volume, String bucket, final String info, - final String prefix, final int maxKeys, - final String startPage, Request req, - UriInfo uriInfo, HttpHeaders headers) - throws OzoneException { - MDC.put(OZONE_FUNCTION, "listBucket"); - return new BucketProcessTemplate() { - @Override - public Response doProcess(BucketArgs args) - throws OzoneException, IOException { - switch (info) { - case Header.OZONE_INFO_QUERY_KEY: - ListArgs listArgs = new ListArgs(args, prefix, maxKeys, startPage); - return getBucketKeysList(listArgs); - case Header.OZONE_INFO_QUERY_BUCKET: - return getBucketInfoResponse(args); - default: - OzoneException ozException = - ErrorTable.newError(ErrorTable.INVALID_QUERY_PARAM, args); - ozException.setMessage("Unrecognized query param : " + info); - throw ozException; - } - } - }.handleCall(volume, bucket, req, uriInfo, headers); - } -} diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketProcessTemplate.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketProcessTemplate.java deleted file mode 100644 index 7dea7167497..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketProcessTemplate.java +++ /dev/null @@ -1,294 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.handlers; - -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.Request; -import javax.ws.rs.core.Response; -import javax.ws.rs.core.UriInfo; -import java.io.IOException; -import java.nio.file.DirectoryNotEmptyException; -import java.nio.file.FileAlreadyExistsException; -import java.nio.file.NoSuchFileException; -import java.util.ArrayList; -import java.util.List; - -import org.apache.hadoop.hdds.protocol.StorageType; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.OzoneRestUtils; -import org.apache.hadoop.ozone.client.rest.OzoneException; -import org.apache.hadoop.ozone.client.rest.headers.Header; -import org.apache.hadoop.ozone.web.exceptions.ErrorTable; -import org.apache.hadoop.ozone.web.interfaces.StorageHandler; -import org.apache.hadoop.ozone.web.interfaces.UserAuth; -import org.apache.hadoop.ozone.web.response.BucketInfo; -import org.apache.hadoop.ozone.web.response.ListKeys; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; - -import static java.net.HttpURLConnection.HTTP_OK; -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_COMPONENT; -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_REQUEST; -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_RESOURCE; -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_USER; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.slf4j.MDC; - -/** - * This class abstracts way the repetitive tasks in - * Bucket handling code. - */ -public abstract class BucketProcessTemplate { - private static final Logger LOG = - LoggerFactory.getLogger(BucketProcessTemplate.class); - - /** - * This function serves as the common error handling function - * for all bucket related operations. - * - * @param volume - Volume Name - * @param bucket - Bucket Name - * @param request - Http Request - * @param uriInfo - Http Uri - * @param headers - Http Headers - * - * @return Response - * - * @throws OzoneException - */ - public Response handleCall(String volume, String bucket, Request request, - UriInfo uriInfo, HttpHeaders headers) - throws OzoneException { - // TODO : Add logging - String reqID = OzoneUtils.getRequestID(); - String hostName = OzoneUtils.getHostName(); - MDC.put(OZONE_COMPONENT, "ozone"); - MDC.put(OZONE_REQUEST, reqID); - UserArgs userArgs = null; - try { - userArgs = new UserArgs(reqID, hostName, request, uriInfo, headers); - - OzoneRestUtils.validate(request, headers, reqID, bucket, hostName); - OzoneUtils.verifyResourceName(bucket); - - UserAuth auth = UserHandlerBuilder.getAuthHandler(); - userArgs.setUserName(auth.getUser(userArgs)); - MDC.put(OZONE_USER, userArgs.getUserName()); - - BucketArgs args = new BucketArgs(volume, bucket, userArgs); - MDC.put(OZONE_RESOURCE, args.getResourceName()); - Response response = doProcess(args); - LOG.debug("Success"); - MDC.clear(); - return response; - - } catch (IllegalArgumentException argEx) { - LOG.error("Invalid bucket.", argEx); - throw ErrorTable.newError(ErrorTable.INVALID_BUCKET_NAME, userArgs, - argEx); - } catch (IOException fsExp) { - handleIOException(bucket, reqID, hostName, fsExp); - } - return null; - } - - /** - * Converts FileSystem IO exceptions to OZONE exceptions. - * - * @param bucket Name of the bucket - * @param reqID Request ID - * @param hostName Machine Name - * @param fsExp Exception - * - * @throws OzoneException - */ - void handleIOException(String bucket, String reqID, String hostName, - IOException fsExp) throws OzoneException { - LOG.error("IOException:", fsExp); - - OzoneException exp = null; - if (fsExp instanceof FileAlreadyExistsException) { - exp = ErrorTable - .newError(ErrorTable.BUCKET_ALREADY_EXISTS, reqID, bucket, hostName); - } - - if (fsExp instanceof DirectoryNotEmptyException) { - exp = ErrorTable - .newError(ErrorTable.BUCKET_NOT_EMPTY, reqID, bucket, hostName); - } - - if (fsExp instanceof NoSuchFileException) { - exp = ErrorTable - .newError(ErrorTable.INVALID_BUCKET_NAME, reqID, bucket, hostName); - } - - // Default we don't handle this exception yet, - // report a Server Internal Error. - if (exp == null) { - exp = - ErrorTable.newError(ErrorTable.SERVER_ERROR, reqID, bucket, hostName); - if (fsExp != null) { - exp.setMessage(fsExp.getMessage()); - } - } - throw exp; - } - - /** - * Abstract function that gets implemented in the BucketHandler functions. - * This function will just deal with the core file system related logic - * and will rely on handleCall function for repetitive error checks - * - * @param args - parsed bucket args, name, userName, ACLs etc - * - * @return Response - * - * @throws OzoneException - * @throws IOException - */ - public abstract Response doProcess(BucketArgs args) - throws OzoneException, IOException; - - - /** - * Returns the ACL String if available. - * This function ignores all ACLs that are not prefixed with either - * ADD or Remove - * - * @param args - BucketArgs - * @param tag - Tag for different type of acls - * - * @return List of ACLs - * - */ - List getAcls(BucketArgs args, String tag) { - List aclStrings = - args.getHeaders().getRequestHeader(Header.OZONE_ACLS); - List filteredSet = null; - if (aclStrings != null) { - filteredSet = new ArrayList<>(); - for (String s : aclStrings) { - if (s.startsWith(tag)) { - filteredSet.add(s.replaceFirst(tag, "")); - } - } - } - return filteredSet; - } - - /** - * Returns bucket versioning Info. - * - * @param args - BucketArgs - * - * @return - String - * - * @throws OzoneException - */ - OzoneConsts.Versioning getVersioning(BucketArgs args) throws OzoneException { - - List versionStrings = - args.getHeaders().getRequestHeader(Header.OZONE_BUCKET_VERSIONING); - if (versionStrings == null) { - return null; - } - - if (versionStrings.size() > 1) { - OzoneException ex = - ErrorTable.newError(ErrorTable.MALFORMED_BUCKET_VERSION, args); - ex.setMessage("Exactly one bucket version header required"); - throw ex; - } - - String version = versionStrings.get(0); - try { - return OzoneConsts.Versioning.valueOf(version); - } catch (IllegalArgumentException ex) { - LOG.debug("Malformed Version. version: {}", version); - throw ErrorTable.newError(ErrorTable.MALFORMED_BUCKET_VERSION, args, ex); - } - } - - - /** - * Returns Storage Class if Available or returns Default. - * - * @param args - bucketArgs - * - * @return StorageType - * - * @throws OzoneException - */ - StorageType getStorageType(BucketArgs args) throws OzoneException { - List storageClassString = null; - try { - storageClassString = - args.getHeaders().getRequestHeader(Header.OZONE_STORAGE_TYPE); - - if (storageClassString == null) { - return null; - } - if (storageClassString.size() > 1) { - OzoneException ex = - ErrorTable.newError(ErrorTable.MALFORMED_STORAGE_TYPE, args); - ex.setMessage("Exactly one storage class header required"); - throw ex; - } - return StorageType.valueOf(storageClassString.get(0).toUpperCase()); - } catch (IllegalArgumentException ex) { - if(storageClassString != null) { - LOG.debug("Malformed storage type. Type: {}", - storageClassString.get(0).toUpperCase()); - } - throw ErrorTable.newError(ErrorTable.MALFORMED_STORAGE_TYPE, args, ex); - } - } - - /** - * Returns BucketInfo response. - * - * @param args - BucketArgs - * - * @return BucketInfo - * - * @throws IOException - * @throws OzoneException - */ - Response getBucketInfoResponse(BucketArgs args) - throws IOException, OzoneException { - StorageHandler fs = StorageHandlerBuilder.getStorageHandler(); - BucketInfo info = fs.getBucketInfo(args); - return OzoneRestUtils.getResponse(args, HTTP_OK, info.toJsonString()); - } - - /** - * Returns list of objects in a bucket. - * @param args - ListArgs - * @return Response - * @throws IOException - * @throws OzoneException - */ - Response getBucketKeysList(ListArgs args) throws IOException, OzoneException { - StorageHandler fs = StorageHandlerBuilder.getStorageHandler(); - ListKeys objects = fs.listKeys(args); - return OzoneRestUtils.getResponse(args.getArgs(), HTTP_OK, - objects.toJsonString()); - } - -} diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyHandler.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyHandler.java deleted file mode 100644 index 97e2a1a3841..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyHandler.java +++ /dev/null @@ -1,302 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.handlers; - -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.Request; -import javax.ws.rs.core.Response; -import javax.ws.rs.core.UriInfo; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; - -import org.apache.hadoop.ozone.OzoneRestUtils; -import org.apache.hadoop.ozone.client.io.LengthInputStream; -import org.apache.hadoop.ozone.client.rest.OzoneException; -import org.apache.hadoop.ozone.client.rest.headers.Header; -import org.apache.hadoop.ozone.web.exceptions.ErrorTable; -import org.apache.hadoop.ozone.web.interfaces.Keys; -import org.apache.hadoop.ozone.web.interfaces.StorageHandler; -import org.apache.hadoop.ozone.web.response.KeyInfo; - -import static java.net.HttpURLConnection.HTTP_CREATED; -import static java.net.HttpURLConnection.HTTP_OK; -import org.apache.commons.codec.binary.Hex; - -/** - * KeyHandler deals with basic Key Operations. - */ -public class KeyHandler implements Keys { - - /** - * Gets the Key/key information if it exists. - * - * @param volume Storage Volume - * @param bucket Name of the bucket - * @param key Name of the key - * @param info Tag info - * @param req Request - * @param uriInfo Uri Info - * @param headers Http Header - * @return Response - * @throws OzoneException - */ - @Override - public Response getKey(String volume, String bucket, String key, String info, - Request req, UriInfo uriInfo, HttpHeaders headers) - throws OzoneException { - return new KeyProcessTemplate() { - /** - * Abstract function that gets implemented in the KeyHandler functions. - * This function will just deal with the core file system related logic - * and will rely on handleCall function for repetitive error checks - * - * @param args - parsed bucket args, name, userName, ACLs etc - * @param input - The body as an Input Stream - * @param request - Http request - * @param headers - Parsed http Headers. - * @param uriInfo - UriInfo - * - * @return Response - * - * @throws IOException - From the file system operations - */ - @Override - public Response doProcess(KeyArgs args, InputStream input, - Request request, HttpHeaders headers, - UriInfo uriInfo) - throws IOException, OzoneException, NoSuchAlgorithmException { - if (info == null) { - return getKey(args); - } else if (info.equals(Header.OZONE_INFO_QUERY_KEY)) { - return getKeyInfo(args); - } else if (info.equals(Header.OZONE_INFO_QUERY_KEY_DETAIL)) { - return getKeyInfoDetail(args); - } - - OzoneException ozException = ErrorTable - .newError(ErrorTable.INVALID_QUERY_PARAM, args); - ozException.setMessage("Unrecognized query param : " + info); - throw ozException; - } - }.handleCall(volume, bucket, key, req, headers, uriInfo, null); - } - - /** - * Gets the Key if it exists. - */ - private Response getKey(KeyArgs args) - throws IOException, OzoneException { - StorageHandler fs = StorageHandlerBuilder.getStorageHandler(); - LengthInputStream stream = fs.newKeyReader(args); - return OzoneRestUtils.getResponse(args, HTTP_OK, stream); - } - - /** - * Gets the Key information if it exists. - */ - private Response getKeyInfo(KeyArgs args) - throws IOException, OzoneException { - StorageHandler fs = StorageHandlerBuilder.getStorageHandler(); - KeyInfo keyInfo = fs.getKeyInfo(args); - return OzoneRestUtils.getResponse(args, HTTP_OK, keyInfo.toJsonString()); - } - - /** - * Gets the Key detail information if it exists. - */ - private Response getKeyInfoDetail(KeyArgs args) - throws IOException, OzoneException { - StorageHandler fs = StorageHandlerBuilder.getStorageHandler(); - KeyInfo keyInfo = fs.getKeyInfoDetails(args); - return OzoneRestUtils.getResponse(args, HTTP_OK, keyInfo.toJsonString()); - } - - /** - * Adds a key to an existing bucket. If the object already exists this call - * will overwrite or add with new version number if the bucket versioning is - * turned on. - * - * @param volume Storage Volume Name - * @param bucket Name of the bucket - * @param keys Name of the Object - * @param is InputStream or File Data - * @param req Request - * @param info - UriInfo - * @param headers http headers - * @return Response - * @throws OzoneException - */ - @Override - public Response putKey(String volume, String bucket, String keys, - InputStream is, Request req, UriInfo info, - HttpHeaders headers) throws OzoneException { - - return new KeyProcessTemplate() { - /** - * Abstract function that gets implemented in the KeyHandler functions. - * This function will just deal with the core file system related logic - * and will rely on handleCall function for repetitive error checks - * - * @param args - parsed bucket args, name, userName, ACLs etc - * @param input - The body as an Input Stream - * @param request - Http request - * @param headers - Parsed http Headers. - * @param info - UriInfo - * - * @return Response - * - * @throws IOException - From the file system operations - */ - @Override - public Response doProcess(KeyArgs args, InputStream input, - Request request, HttpHeaders headers, - UriInfo info) - throws IOException, OzoneException, NoSuchAlgorithmException { - final int eof = -1; - StorageHandler fs = StorageHandlerBuilder.getStorageHandler(); - - byte[] buffer = new byte[4 * 1024]; - String contentLenString = getContentLength(headers, args); - String newLen = contentLenString.replaceAll("\"", ""); - int contentLen = Integer.parseInt(newLen); - args.setSize(contentLen); - - MessageDigest md5 = MessageDigest.getInstance("MD5"); - int bytesRead = 0; - int len = 0; - OutputStream stream = fs.newKeyWriter(args); - while ((bytesRead < contentLen) && (len != eof)) { - int readSize = - (contentLen - bytesRead > buffer.length) ? buffer.length : - contentLen - bytesRead; - len = input.read(buffer, 0, readSize); - if (len != eof) { - stream.write(buffer, 0, len); - md5.update(buffer, 0, len); - bytesRead += len; - - } - } - - checkFileLengthMatch(args, fs, contentLen, bytesRead); - - String hashString = Hex.encodeHexString(md5.digest()); -// TODO : Enable hash value checking. -// String contentHash = getContentMD5(headers, args); -// checkFileHashMatch(args, hashString, fs, contentHash); - args.setHash(hashString); - args.setSize(bytesRead); - fs.commitKey(args, stream); - return OzoneRestUtils.getResponse(args, HTTP_CREATED, ""); - } - }.handleCall(volume, bucket, keys, req, headers, info, is); - } - - /** - * Deletes an existing key. - * - * @param volume Storage Volume Name - * @param bucket Name of the bucket - * @param keys Name of the Object - * @param req http Request - * @param info - UriInfo - * @param headers HttpHeaders - * @return Response - * @throws OzoneException - */ - @Override - public Response deleteKey(String volume, String bucket, String keys, - Request req, UriInfo info, HttpHeaders headers) - throws OzoneException { - return new KeyProcessTemplate() { - /** - * Abstract function that gets implemented in the KeyHandler functions. - * This function will just deal with the core file system related logic - * and will rely on handleCall function for repetitive error checks - * - * @param args - parsed bucket args, name, userName, ACLs etc - * @param input - The body as an Input Stream - * @param request - Http request - * @param headers - Parsed http Headers. - * @param info - UriInfo - * - * @return Response - * - * @throws IOException - From the file system operations - */ - @Override - public Response doProcess(KeyArgs args, InputStream input, - Request request, HttpHeaders headers, - UriInfo info) - throws IOException, OzoneException, NoSuchAlgorithmException { - StorageHandler fs = StorageHandlerBuilder.getStorageHandler(); - fs.deleteKey(args); - return OzoneRestUtils.getResponse(args, HTTP_OK, ""); - } - }.handleCall(volume, bucket, keys, req, headers, info, null); - } - - /** - * Renames an existing key within a bucket. - * - * @param volume Storage Volume Name - * @param bucket Name of the bucket - * @param key Name of the Object - * @param toKeyName New name of the Object - * @param req http Request - * @param info UriInfo - * @param headers HttpHeaders - * @return Response - * @throws OzoneException - */ - @Override - public Response renameKey(String volume, String bucket, String key, - String toKeyName, Request req, UriInfo info, HttpHeaders headers) - throws OzoneException { - return new KeyProcessTemplate() { - /** - * Abstract function that gets implemented in the KeyHandler functions. - * This function will just deal with the core file system related logic - * and will rely on handleCall function for repetitive error checks - * - * @param args - parsed bucket args, name, userName, ACLs etc - * @param input - The body as an Input Stream - * @param request - Http request - * @param headers - Parsed http Headers. - * @param info - UriInfo - * - * @return Response - * - * @throws IOException - From the file system operations - */ - @Override - public Response doProcess(KeyArgs args, InputStream input, - Request request, HttpHeaders headers, - UriInfo info) - throws IOException, OzoneException, NoSuchAlgorithmException { - StorageHandler fs = StorageHandlerBuilder.getStorageHandler(); - fs.renameKey(args, toKeyName); - return OzoneRestUtils.getResponse(args, HTTP_OK, ""); - } - }.handleCall(volume, bucket, key, req, headers, info, null); - } -} diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyProcessTemplate.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyProcessTemplate.java deleted file mode 100644 index ad487873187..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyProcessTemplate.java +++ /dev/null @@ -1,235 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.handlers; - -import org.apache.commons.codec.binary.Base64; - -import org.apache.hadoop.ozone.OzoneRestUtils; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.web.exceptions.ErrorTable; -import org.apache.hadoop.ozone.client.rest.OzoneException; -import org.apache.hadoop.ozone.client.rest.headers.Header; -import org.apache.hadoop.ozone.web.interfaces.StorageHandler; -import org.apache.hadoop.ozone.web.interfaces.UserAuth; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.slf4j.MDC; - -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.Request; -import javax.ws.rs.core.Response; -import javax.ws.rs.core.UriInfo; -import java.io.IOException; -import java.io.InputStream; -import java.security.NoSuchAlgorithmException; -import java.util.List; - -import static org.apache.hadoop.ozone.web.exceptions.ErrorTable.BAD_DIGEST; -import static org.apache.hadoop.ozone.web.exceptions.ErrorTable.INCOMPLETE_BODY; -import static org.apache.hadoop.ozone.web.exceptions.ErrorTable.INVALID_BUCKET_NAME; -import static org.apache.hadoop.ozone.web.exceptions.ErrorTable.INVALID_REQUEST; -import static org.apache.hadoop.ozone.web.exceptions.ErrorTable.SERVER_ERROR; -import static org.apache.hadoop.ozone.web.exceptions.ErrorTable.newError; -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_COMPONENT; -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_RESOURCE; -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_REQUEST; -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_USER; - - -/** - * This class abstracts way the repetitive tasks in Key handling code. - */ -public abstract class KeyProcessTemplate { - private static final Logger LOG = - LoggerFactory.getLogger(KeyProcessTemplate.class); - - /** - * This function serves as the common error handling function for all Key - * related operations. - * - * @param bucket bucket Name - * @param key the object name - * @param headers Http headers - * @param is Input XML stream - * @throws OzoneException - */ - public Response handleCall(String volume, String bucket, String key, - Request request, HttpHeaders headers, UriInfo info, - InputStream is) throws OzoneException { - - String reqID = OzoneUtils.getRequestID(); - String hostName = OzoneUtils.getHostName(); - MDC.put(OZONE_COMPONENT, "ozone"); - MDC.put(OZONE_REQUEST, reqID); - UserArgs userArgs = null; - try { - userArgs = new UserArgs(reqID, hostName, request, info, headers); - OzoneRestUtils.validate(request, headers, reqID, bucket, hostName); - OzoneUtils.verifyResourceName(bucket); - - UserAuth auth = UserHandlerBuilder.getAuthHandler(); - userArgs.setUserName(auth.getUser(userArgs)); - MDC.put(OZONE_USER, userArgs.getUserName()); - - KeyArgs args = new KeyArgs(volume, bucket, key, userArgs); - MDC.put(OZONE_RESOURCE, args.getResourceName()); - Response response = doProcess(args, is, request, headers, info); - LOG.debug("Success"); - MDC.clear(); - return response; - - } catch (IllegalArgumentException argExp) { - LOG.error("Invalid bucket in key call.", argExp); - throw newError(INVALID_BUCKET_NAME, userArgs, argExp); - } catch (IOException fsExp) { - LOG.error("IOException:", fsExp); - // Map KEY_NOT_FOUND to INVALID_KEY - if (fsExp.getMessage().endsWith( - OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND.name())) { - throw ErrorTable.newError(ErrorTable.INVALID_KEY, userArgs, fsExp); - } - - // TODO : Handle errors from the FileSystem , let us map to server error - // for now. - throw ErrorTable.newError(ErrorTable.SERVER_ERROR, userArgs, fsExp); - } catch (NoSuchAlgorithmException algoEx) { - LOG.error("NoSuchAlgorithmException. Probably indicates an unusual java " - + "installation.", algoEx); - throw ErrorTable.newError(SERVER_ERROR, userArgs, algoEx); - } - } - - /** - * Abstract function that gets implemented in the KeyHandler functions. This - * function will just deal with the core file system related logic and will - * rely on handleCall function for repetitive error checks - * - * @param args - parsed bucket args, name, userName, ACLs etc - * @param input - The body as an Input Stream - * @param request - Http request - * @param headers - Parsed http Headers. - * @param info - UriInfo - * @return Response - * @throws IOException - From the file system operations - */ - public abstract Response doProcess(KeyArgs args, InputStream input, - Request request, HttpHeaders headers, - UriInfo info) - throws IOException, OzoneException, NoSuchAlgorithmException; - - /** - * checks if the File Content-MD5 we wrote matches the hash we computed from - * the stream. if it does match we delete the file and throw and exception to - * let the user know that we have a hash mismatch - * - * @param args Object Args - * @param computedString MD5 hash value - * @param fs Pointer to File System so we can delete the file - * @param contentHash User Specified hash string - * @throws IOException - * @throws OzoneException - */ - public void checkFileHashMatch(KeyArgs args, String computedString, - StorageHandler fs, String contentHash) - throws IOException, OzoneException { - if (contentHash != null) { - String contentString = - new String(Base64.decodeBase64(contentHash), OzoneUtils.ENCODING) - .trim(); - - if (!contentString.equals(computedString)) { - fs.deleteKey(args); - OzoneException ex = ErrorTable.newError(BAD_DIGEST, args); - String msg = String.format("MD5 Digest mismatch. Expected %s Found " + - "%s", contentString, computedString); - ex.setMessage(msg); - LOG.debug(msg); - throw ex; - } - } - } - - /** - * check if the content-length matches the actual stream length. if we find a - * mismatch we will delete the file and throw an exception to let the user - * know that length mismatch detected - * - * @param args Object Args - * @param fs Pointer to File System Object, to delete the file that we - * wrote - * @param contentLen Http Content-Length Header - * @param bytesRead Actual Bytes we read from the stream - * @throws IOException - * @throws OzoneException - */ - public void checkFileLengthMatch(KeyArgs args, StorageHandler fs, - int contentLen, int bytesRead) - throws IOException, OzoneException { - if (bytesRead != contentLen) { - fs.deleteKey(args); - OzoneException ex = ErrorTable.newError(INCOMPLETE_BODY, args); - String msg = String.format("Body length mismatch. Expected length : %d" + - " Found %d", contentLen, bytesRead); - ex.setMessage(msg); - LOG.debug(msg); - throw ex; - } - } - - /** - * Returns Content Length header value if available. - * - * @param headers - Http Headers - * @return - String or null - */ - public String getContentLength(HttpHeaders headers, KeyArgs args) - throws OzoneException { - List contentLengthList = - headers.getRequestHeader(HttpHeaders.CONTENT_LENGTH); - if ((contentLengthList != null) && (contentLengthList.size() > 0)) { - return contentLengthList.get(0); - } - - OzoneException ex = ErrorTable.newError(INVALID_REQUEST, args); - ex.setMessage("Content-Length is a required header for putting a key."); - throw ex; - - } - - /** - * Returns Content MD5 value if available. - * - * @param headers - Http Headers - * @return - String or null - */ - public String getContentMD5(HttpHeaders headers, KeyArgs args) { - List contentLengthList = - headers.getRequestHeader(Header.CONTENT_MD5); - if ((contentLengthList != null) && (contentLengthList.size() > 0)) { - return contentLengthList.get(0); - } -// TODO : Should we make this compulsory ? -// OzoneException ex = ErrorTable.newError(ErrorTable.invalidRequest, args); -// ex.setMessage("Content-MD5 is a required header for putting a key"); -// throw ex; - return ""; - } -} - diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/ServiceFilter.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/ServiceFilter.java deleted file mode 100644 index 76aa2868ea7..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/ServiceFilter.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.handlers; - -import com.sun.jersey.spi.container.ContainerRequest; -import com.sun.jersey.spi.container.ContainerRequestFilter; -import org.apache.hadoop.ozone.client.rest.headers.Header; - -import javax.ws.rs.core.UriBuilder; -import javax.ws.rs.ext.Provider; - -/** - * This class is used to intercept root URL requests and route it to - * Volume List functionality. - */ -@Provider -public class ServiceFilter implements ContainerRequestFilter { - /** - * Filter the request. - *

- * An implementation may modify the state of the request or - * create a new instance. - * - * @param request the request. - * - * @return the request. - */ - @Override - public ContainerRequest filter(ContainerRequest request) { - if (request.getRequestUri().getPath().length() > 1) { - return request; - } - - // Just re-route it to volume handler with some hypothetical volume name. - // volume name is ignored. - - request.setUris(request.getBaseUri(), - UriBuilder.fromUri(request.getRequestUri()) - .path("/service") - .queryParam("info", Header.OZONE_LIST_QUERY_SERVICE) - .build()); - - return request; - } -} diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/StorageHandlerBuilder.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/StorageHandlerBuilder.java deleted file mode 100644 index b3c33914338..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/StorageHandlerBuilder.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.handlers; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.ozone.web.interfaces.StorageHandler; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; - -/** - * This class is responsible for providing a {@link StorageHandler} - * implementation to object store web handlers. - */ -@InterfaceAudience.Private -public final class StorageHandlerBuilder { - - - private static final Logger LOG = - LoggerFactory.getLogger(StorageHandlerBuilder.class); - private static final ThreadLocal - STORAGE_HANDLER_THREAD_LOCAL = new ThreadLocal<>(); - - /** - * Returns the configured StorageHandler from thread-local storage for this - * thread. - * - * @return StorageHandler from thread-local storage - */ - public static StorageHandler getStorageHandler() throws IOException { - StorageHandler storageHandler = STORAGE_HANDLER_THREAD_LOCAL.get(); - if (storageHandler != null) { - return storageHandler; - } else { - LOG.error("No Storage Handler Configured."); - throw new IOException("Invalid Handler Configuration"); - } - - } - - /** - * Removes the configured StorageHandler from thread-local storage for this - * thread. - */ - public static void removeStorageHandler() { - STORAGE_HANDLER_THREAD_LOCAL.remove(); - } - - /** - * Sets the configured StorageHandler in thread-local storage for this thread. - * - * @param storageHandler StorageHandler to set in thread-local storage - */ - public static void setStorageHandler(StorageHandler storageHandler) { - STORAGE_HANDLER_THREAD_LOCAL.set(storageHandler); - } - - /** - * There is no reason to instantiate this class. - */ - private StorageHandlerBuilder() { - } -} diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/UserHandlerBuilder.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/UserHandlerBuilder.java deleted file mode 100644 index d9051f3449e..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/UserHandlerBuilder.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.handlers; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.ozone.web.interfaces.UserAuth; -import org.apache.hadoop.ozone.web.userauth.Simple; - -/** - * This class is responsible for providing a - * {@link org.apache.hadoop.ozone.web.interfaces.UserAuth} - * implementation to object store web handlers. - */ -@InterfaceAudience.Private -public final class UserHandlerBuilder { - - private static final ThreadLocal USER_AUTH_THREAD_LOCAL = - new ThreadLocal(); - - /** - * Returns the configured UserAuth from thread-local storage for this - * thread. - * - * @return UserAuth from thread-local storage - */ - public static UserAuth getAuthHandler() { - UserAuth authHandler = USER_AUTH_THREAD_LOCAL.get(); - if (authHandler != null) { - return authHandler; - } else { - // This only happens while using mvn jetty:run for testing. - return new Simple(); - } - } - - /** - * Removes the configured UserAuth from thread-local storage for this - * thread. - */ - public static void removeAuthHandler() { - USER_AUTH_THREAD_LOCAL.remove(); - } - - /** - * Sets the configured UserAuthHandler in thread-local storage for this - * thread. - * - * @param authHandler authHandler to set in thread-local storage - */ - public static void setAuthHandler(UserAuth authHandler) { - USER_AUTH_THREAD_LOCAL.set(authHandler); - } - - /** - * There is no reason to instantiate this class. - */ - private UserHandlerBuilder() { - } -} diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeHandler.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeHandler.java deleted file mode 100644 index bd04ea6ea07..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeHandler.java +++ /dev/null @@ -1,274 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.handlers; - -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.Request; -import javax.ws.rs.core.Response; -import javax.ws.rs.core.UriInfo; -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.ozone.OzoneRestUtils; -import org.apache.hadoop.ozone.client.rest.OzoneException; -import org.apache.hadoop.ozone.client.rest.headers.Header; -import org.apache.hadoop.ozone.web.exceptions.ErrorTable; -import org.apache.hadoop.ozone.web.interfaces.StorageHandler; -import org.apache.hadoop.ozone.web.interfaces.UserAuth; -import org.apache.hadoop.ozone.web.interfaces.Volume; - -import static java.net.HttpURLConnection.HTTP_CREATED; -import static java.net.HttpURLConnection.HTTP_OK; -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_FUNCTION; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.slf4j.MDC; - -/** - * VolumeHandler handles volume specific HTTP calls. - * - * Most functions in this file follow a simple pattern. - * All calls are handled by VolumeProcessTemplate.handleCall, which - * calls back into doProcess function. - * - * Everything common to volume handling is abstracted out in handleCall function - * For Example : Checking that volume name is sane, we have a supported - * ozone version number and a valid date. That is everything common is in - * handleCall and actions specific to a call is inside doProcess callback. - */ -@InterfaceAudience.Private -public class VolumeHandler implements Volume { - private static final Logger LOG = LoggerFactory.getLogger(VolumeHandler - .class); - /** - * Creates a volume. - * - * @param volume Volume Name, this has to be unique at Ozone cluster level - * @param quota Quota for this Storage Volume - * - {@literal ()} - * @param req Request Object - * @param uriInfo URI info - * @param headers Http Headers - * - * @return Standard JAX-RS Response - * - * @throws OzoneException - */ - @Override - public Response createVolume(String volume, final String quota, Request req, - UriInfo uriInfo, HttpHeaders headers) - throws OzoneException { - MDC.put(OZONE_FUNCTION, "createVolume"); - return new VolumeProcessTemplate() { - @Override - public Response doProcess(VolumeArgs args) - throws IOException, OzoneException { - UserAuth auth = UserHandlerBuilder.getAuthHandler(); - if (auth.isAdmin(args)) { - args.setAdminName(args.getUserName()); - String volumeOwner = auth.getOzoneUser(args); - - if (volumeOwner == null) { - throw ErrorTable.newError(ErrorTable.USER_NOT_FOUND, args); - } - - if (!auth.isUser(volumeOwner, args)) { - throw ErrorTable.newError(ErrorTable.USER_NOT_FOUND, args); - } - - args.setUserName(volumeOwner); - args.setGroups(auth.getGroups(args)); - if (!quota.equals(Header.OZONE_QUOTA_UNDEFINED)) { - setQuotaArgs(args, quota); - } - StorageHandler fs = StorageHandlerBuilder.getStorageHandler(); - fs.createVolume(args); - return OzoneRestUtils.getResponse(args, HTTP_CREATED, ""); - } else { - throw ErrorTable.newError(ErrorTable.ACCESS_DENIED, args); - } - } - }.handleCall(volume, req, uriInfo, headers); - } - - /** - * Updates volume metadata. - * - * There are only two actions possible currently with updateVolume. - * Change the volume ownership or update quota. if you make a call - * with neither of these actions, update just returns 200 OK. - * - * @param volume Volume Name, this has to be unique at Ozone Level - * @param quota Quota for this volume - * - {@literal ()}|remove - * @param req - Request Object - * @param uriInfo - URI info - * @param headers Http Headers - * - * @return Standard JAX-RS Response - * - * @throws OzoneException - */ - @Override - public Response updateVolume(String volume, final String quota, Request req, - UriInfo uriInfo, HttpHeaders headers) - throws OzoneException { - MDC.put(OZONE_FUNCTION, "updateVolume"); - return new VolumeProcessTemplate() { - @Override - public Response doProcess(VolumeArgs args) - throws IOException, OzoneException { - UserAuth auth = UserHandlerBuilder.getAuthHandler(); - if (auth.isAdmin(args)) { - StorageHandler fs = StorageHandlerBuilder.getStorageHandler(); - args.setAdminName(args.getUserName()); - String newVolumeOwner = auth.getOzoneUser(args); - - if (newVolumeOwner != null) { - if (!auth.isUser(newVolumeOwner, args)) { - throw ErrorTable.newError(ErrorTable.USER_NOT_FOUND, args); - } - args.setUserName(newVolumeOwner); - fs.setVolumeOwner(args); - } - - if (!quota.equals(Header.OZONE_QUOTA_UNDEFINED)) { - if (quota.equals(Header.OZONE_QUOTA_REMOVE)) { - // if it is remove, just tell the file system to remove quota - fs.setVolumeQuota(args, true); - } else { - setQuotaArgs(args, quota); - fs.setVolumeQuota(args, false); - } - } - return OzoneRestUtils.getResponse(args, HTTP_OK, ""); - } else { - // Only Admins are allowed to update volumes - throw ErrorTable.newError(ErrorTable.ACCESS_DENIED, args); - } - } - }.handleCall(volume, req, uriInfo, headers); - } - - - /** - * Deletes a volume if it is empty. - * - * @param volume Volume Name - * @param req - Http Request - * @param uriInfo - http URI - * @param headers - http headers - * - * @return Standard JAX-RS Response - * - * @throws OzoneException - */ - @Override - public Response deleteVolume(String volume, Request req, UriInfo uriInfo, - HttpHeaders headers) throws OzoneException { - MDC.put(OZONE_FUNCTION, "deleteVolume"); - - return new VolumeProcessTemplate() { - @Override - public Response doProcess(VolumeArgs args) - throws IOException, OzoneException { - UserAuth auth = UserHandlerBuilder.getAuthHandler(); - if (auth.isAdmin(args)) { - StorageHandler fs = StorageHandlerBuilder.getStorageHandler(); - fs.deleteVolume(args); - return OzoneRestUtils.getResponse(args, HTTP_OK, ""); - } else { - throw ErrorTable.newError(ErrorTable.ACCESS_DENIED, args); - } - } - }.handleCall(volume, req, uriInfo, headers); - } - - /** - * Returns Volume info. This API can be invoked either by admin or the owner - * - * @param volume - Storage Volume Name - * @param info - Info attribute - * @param prefix - Prefix key - * @param maxKeys - Max results - * @param prevKey - PrevKey - * @param req - Http Req - * @param uriInfo - UriInfo. - * @param headers - Http headers - * @return - * @throws OzoneException - */ - @Override - public Response getVolumeInfo(String volume, final String info, - final String prefix, - final int maxKeys, - final String prevKey, - final boolean rootScan, - Request req, - final UriInfo uriInfo, HttpHeaders headers) - throws OzoneException { - - return new VolumeProcessTemplate() { - @Override - public Response doProcess(VolumeArgs args) - throws IOException, OzoneException { - - switch (info) { - case Header.OZONE_INFO_QUERY_BUCKET: - MDC.put(OZONE_FUNCTION, "ListBucket"); - return getBucketsInVolume(args, prefix, maxKeys, prevKey); - case Header.OZONE_INFO_QUERY_VOLUME: - MDC.put(OZONE_FUNCTION, "InfoVolume"); - assertNoListParamPresent(uriInfo, args); - return getVolumeInfoResponse(args); // Return volume info - case Header.OZONE_LIST_QUERY_SERVICE: - MDC.put(OZONE_FUNCTION, "ListVolume"); - return getVolumesByUser(args, prefix, maxKeys, prevKey, rootScan); - default: - LOG.debug("Unrecognized query param : {} ", info); - OzoneException ozoneException = - ErrorTable.newError(ErrorTable.INVALID_QUERY_PARAM, args); - ozoneException.setMessage("Unrecognized query param : " + info); - throw ozoneException; - } - } - }.handleCall(volume, req, uriInfo, headers); - } - - /** - * Asserts no list query param is present during this call. - * - * @param uriInfo - UriInfo. - UriInfo - * @param args - Volume Args - VolumeArgs. - * @throws OzoneException - */ - private void assertNoListParamPresent(final UriInfo uriInfo, VolumeArgs - args) throws - OzoneException { - - String prefix = uriInfo.getQueryParameters().getFirst("prefix"); - String maxKeys = uriInfo.getQueryParameters().getFirst("max_keys"); - String prevKey = uriInfo.getQueryParameters().getFirst("prev_key"); - if ((prefix != null && !prefix.equals(Header.OZONE_EMPTY_STRING)) || - (maxKeys != null && !maxKeys.equals(Header.OZONE_DEFAULT_LIST_SIZE)) || - (prevKey != null && !prevKey.equals(Header.OZONE_EMPTY_STRING))) { - throw ErrorTable.newError(ErrorTable.INVALID_QUERY_PARAM, args); - } - } -} diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeProcessTemplate.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeProcessTemplate.java deleted file mode 100644 index fb95bb9a9f5..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeProcessTemplate.java +++ /dev/null @@ -1,276 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.handlers; - -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.Request; -import javax.ws.rs.core.Response; -import javax.ws.rs.core.UriInfo; -import java.io.IOException; -import java.nio.file.DirectoryNotEmptyException; -import java.nio.file.FileAlreadyExistsException; -import java.nio.file.NoSuchFileException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.ozone.OzoneRestUtils; -import org.apache.hadoop.ozone.client.rest.OzoneException; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.web.exceptions.ErrorTable; -import org.apache.hadoop.ozone.web.interfaces.StorageHandler; -import org.apache.hadoop.ozone.web.interfaces.UserAuth; -import org.apache.hadoop.ozone.web.response.ListBuckets; -import org.apache.hadoop.ozone.web.response.ListVolumes; -import org.apache.hadoop.ozone.web.response.VolumeInfo; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; - -import static java.net.HttpURLConnection.HTTP_OK; -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_COMPONENT; -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_REQUEST; -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_RESOURCE; -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_USER; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.slf4j.MDC; - - -/** - * This class abstracts way the repetitive tasks in - * handling volume related code. - */ -@InterfaceAudience.Private -public abstract class VolumeProcessTemplate { - private static final Logger LOG = - LoggerFactory.getLogger(VolumeProcessTemplate.class); - - - /** - * The handle call is the common functionality for Volume - * handling code. - * - * @param volume - Name of the Volume - * @param request - request - * @param info - UriInfo - * @param headers - Http Headers - * - * @return Response - * - * @throws OzoneException - */ - public Response handleCall(String volume, Request request, UriInfo info, - HttpHeaders headers) throws OzoneException { - String reqID = OzoneUtils.getRequestID(); - String hostName = OzoneUtils.getHostName(); - MDC.put(OZONE_COMPONENT, "ozone"); - MDC.put(OZONE_REQUEST, reqID); - UserArgs userArgs = null; - try { - userArgs = new UserArgs(reqID, hostName, request, info, headers); - OzoneRestUtils.validate(request, headers, reqID, volume, hostName); - - // we use the same logic for both bucket and volume names - OzoneUtils.verifyResourceName(volume); - UserAuth auth = UserHandlerBuilder.getAuthHandler(); - - userArgs.setUserName(auth.getUser(userArgs)); - MDC.put(OZONE_USER, userArgs.getUserName()); - VolumeArgs args = new VolumeArgs(volume, userArgs); - - MDC.put(OZONE_RESOURCE, args.getResourceName()); - Response response = doProcess(args); - LOG.info("Success"); - MDC.clear(); - return response; - - } catch (IllegalArgumentException ex) { - LOG.error("Illegal argument.", ex); - throw ErrorTable.newError(ErrorTable.INVALID_VOLUME_NAME, userArgs, ex); - } catch (IOException ex) { - handleIOException(volume, reqID, hostName, ex); - } - return null; - } - - /** - * Specific handler for each call. - * - * @param args - Volume Args - * - * @return - Response - * - * @throws IOException - * @throws OzoneException - */ - public abstract Response doProcess(VolumeArgs args) - throws IOException, OzoneException; - - /** - * Maps Java File System Exceptions to Ozone Exceptions in the Volume path. - * - * @param volume - Name of the Volume - * @param reqID - Request ID - * @param hostName - HostName - * @param fsExp - Exception - * - * @throws OzoneException - */ - private void handleIOException(String volume, String reqID, String hostName, - IOException fsExp) throws OzoneException { - LOG.error("IOException:", fsExp); - OzoneException exp = null; - - if ((fsExp != null && fsExp.getMessage().endsWith( - OzoneManagerProtocolProtos.Status.VOLUME_ALREADY_EXISTS.name())) - || fsExp instanceof FileAlreadyExistsException) { - exp = ErrorTable - .newError(ErrorTable.VOLUME_ALREADY_EXISTS, reqID, volume, hostName); - } - - if (fsExp instanceof DirectoryNotEmptyException) { - exp = ErrorTable - .newError(ErrorTable.VOLUME_NOT_EMPTY, reqID, volume, hostName); - } - - if (fsExp instanceof NoSuchFileException) { - exp = ErrorTable - .newError(ErrorTable.INVALID_VOLUME_NAME, reqID, volume, hostName); - } - - if ((fsExp != null) && (exp != null)) { - exp.setMessage(fsExp.getMessage()); - } - - // We don't handle that FS error yet, report a Server Internal Error - if (exp == null) { - exp = - ErrorTable.newError(ErrorTable.SERVER_ERROR, reqID, volume, hostName); - if (fsExp != null) { - exp.setMessage(fsExp.getMessage()); - } - } - throw exp; - } - - /** - * Set the user provided string into args and throw ozone exception - * if needed. - * - * @param args - volume args - * @param quota - quota sting - * - * @throws OzoneException - */ - void setQuotaArgs(VolumeArgs args, String quota) throws OzoneException { - try { - args.setQuota(quota); - } catch (IllegalArgumentException ex) { - LOG.debug("Malformed Quota.", ex); - throw ErrorTable.newError(ErrorTable.MALFORMED_QUOTA, args, ex); - } - } - - /** - * Wraps calls into volumeInfo data. - * - * @param args - volumeArgs - * - * @return - VolumeInfo - * - * @throws IOException - * @throws OzoneException - */ - Response getVolumeInfoResponse(VolumeArgs args) - throws IOException, OzoneException { - StorageHandler fs = StorageHandlerBuilder.getStorageHandler(); - VolumeInfo info = fs.getVolumeInfo(args); - return OzoneRestUtils.getResponse(args, HTTP_OK, info.toJsonString()); - } - - /** - * Returns all the volumes belonging to a user. - * - * @param user - userArgs - * @return - Response - * @throws OzoneException - * @throws IOException - */ - Response getVolumesByUser(UserArgs user, String prefix, int maxKeys, - String prevKey, boolean rootScan) throws OzoneException, IOException { - - String validatedUser = user.getUserName(); - try { - UserAuth auth = UserHandlerBuilder.getAuthHandler(); - if(rootScan && !auth.isAdmin(user)) { - throw ErrorTable.newError(ErrorTable.UNAUTHORIZED, user); - } - if (auth.isAdmin(user)) { - validatedUser = auth.getOzoneUser(user); - if (validatedUser == null) { - validatedUser = auth.getUser(user); - } - } - - UserArgs onBehalfOf = - new UserArgs(validatedUser, user.getRequestID(), user.getHostName(), - user.getRequest(), user.getUri(), user.getHeaders()); - - StorageHandler fs = StorageHandlerBuilder.getStorageHandler(); - ListArgs listArgs = new ListArgs<>(onBehalfOf, prefix, - maxKeys, prevKey); - listArgs.setRootScan(rootScan); - ListVolumes volumes = fs.listVolumes(listArgs); - return OzoneRestUtils.getResponse(user, HTTP_OK, volumes.toJsonString()); - } catch (IOException ex) { - LOG.debug("unable to get the volume list for the user.", ex); - OzoneException exp = ErrorTable.newError(ErrorTable.SERVER_ERROR, - user, ex); - exp.setMessage("unable to get the volume list for the user"); - throw exp; - } - } - - /** - * Returns a list of Buckets in a Volume. - * - * @param args - VolumeArgs - * @param prefix - Prefix to Match - * @param maxKeys - Max results to return. - * @param prevKey - PrevKey - * @return List of Buckets - * @throws OzoneException - */ - Response getBucketsInVolume(VolumeArgs args, String prefix, int maxKeys, - String prevKey) throws OzoneException { - try { - // UserAuth auth = UserHandlerBuilder.getAuthHandler(); - // TODO : Check ACLS. - StorageHandler fs = StorageHandlerBuilder.getStorageHandler(); - ListArgs listArgs = new ListArgs<>(args, prefix, - maxKeys, prevKey); - ListBuckets bucketList = fs.listBuckets(listArgs); - return OzoneRestUtils - .getResponse(args, HTTP_OK, bucketList.toJsonString()); - } catch (IOException ex) { - LOG.debug("unable to get the bucket list for the specified volume.", ex); - OzoneException exp = - ErrorTable.newError(ErrorTable.SERVER_ERROR, args, ex); - exp.setMessage("unable to get the bucket list for the specified volume."); - throw exp; - } - } -} diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/package-info.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/package-info.java deleted file mode 100644 index 4d34c2d0131..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.handlers; - -/** - This package contains ozone client side libraries. - */ diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/Accounting.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/Accounting.java deleted file mode 100644 index 73290b3aeed..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/Accounting.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.interfaces; - -/** - * This in the accounting interface, Ozone Rest interface will call into this - * interface whenever a put or delete key happens. - *

- * TODO : Technically we need to report bucket creation and deletion too - * since the bucket names and metadata consume storage. - *

- * TODO : We should separate out reporting metadata & data -- - *

- * In some cases end users will only want to account for the data they are - * storing since metadata is mostly a cost of business. - */ -public interface Accounting { - /** - * This call is made when ever a put key call is made. - *

- * In case of a Put which causes a over write of a key accounting system will - * see two calls, a removeByte call followed by an addByte call. - * - * @param owner - Volume Owner - * @param volume - Name of the Volume - * @param bucket - Name of the bucket - * @param bytes - How many bytes are put - */ - void addBytes(String owner, String volume, String bucket, int bytes); - - /** - * This call is made whenever a delete call is made. - * - * @param owner - Volume Owner - * @param volume - Name of the Volume - * @param bucket - Name of the bucket - * @param bytes - How many bytes are deleted - */ - void removeBytes(String owner, String volume, String bucket, int bytes); - -} diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/Bucket.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/Bucket.java deleted file mode 100644 index fe4b1b6280a..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/Bucket.java +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.interfaces; - -import io.swagger.annotations.Api; -import io.swagger.annotations.ApiImplicitParam; -import io.swagger.annotations.ApiImplicitParams; -import io.swagger.annotations.ApiOperation; -import org.apache.hadoop.ozone.client.rest.OzoneException; -import org.apache.hadoop.ozone.client.rest.headers.Header; - -import javax.ws.rs.DELETE; -import javax.ws.rs.DefaultValue; -import javax.ws.rs.GET; -import javax.ws.rs.POST; -import javax.ws.rs.PUT; -import javax.ws.rs.Path; -import javax.ws.rs.PathParam; -import javax.ws.rs.Produces; -import javax.ws.rs.QueryParam; -import javax.ws.rs.core.Context; -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.Request; -import javax.ws.rs.core.Response; -import javax.ws.rs.core.UriInfo; - -/** - * Bucket Interface acts as the HTTP entry point for - * bucket related functionality. - */ -@Path("/{volume}/{bucket}") -@Api(tags = "bucket") -public interface Bucket { - /** - * createBucket call handles the POST request for Creating a Bucket. - * - * @param volume - Volume name - * @param bucket - Bucket Name - * @param req - Http request - * @param info - Uri Info - * @param headers - Http headers - * - * @return Response - * - * @throws OzoneException - */ - @POST - @ApiOperation("Create new bucket to a volume") - @ApiImplicitParams({ - @ApiImplicitParam(name = "x-ozone-version", example = "v1", required = - true, paramType = "header"), - @ApiImplicitParam(name = "x-ozone-user", example = "user", required = - true, paramType = "header"), - @ApiImplicitParam(name = "Date", example = "Date: Mon, 26 Jun 2017 " - + "04:23:30 GMT", required = true, paramType = "header"), - @ApiImplicitParam(name = "Authorization", example = "OZONE", required = - true, paramType = "header")}) - Response createBucket(@PathParam("volume") String volume, - @PathParam("bucket") String bucket, - @Context Request req, @Context UriInfo info, - @Context HttpHeaders headers) throws OzoneException; - - /** - * updateBucket call handles the PUT request for updating a Bucket. - * - * @param volume - Volume name - * @param bucket - Bucket name - * @param req - Http request - * @param info - Uri Info - * @param headers - Http headers - * - * @return Response - * - * @throws OzoneException - */ - @PUT - @ApiOperation("Update bucket") - @ApiImplicitParams({ - @ApiImplicitParam(name = "x-ozone-version", example = "v1", required = - true, paramType = "header"), - @ApiImplicitParam(name = "x-ozone-user", example = "user", required = - true, paramType = "header"), - @ApiImplicitParam(name = "Date", example = "Date: Mon, 26 Jun 2017 " - + "04:23:30 GMT", required = true, paramType = "header"), - @ApiImplicitParam(name = "Authorization", example = "OZONE", required = - true, paramType = "header")}) - Response updateBucket(@PathParam("volume") String volume, - @PathParam("bucket") String bucket, - @Context Request req, @Context UriInfo info, - @Context HttpHeaders headers) throws OzoneException; - - /** - * Deletes an empty bucket. - * - * @param volume Volume name - * @param bucket Bucket Name - * @param req - Http request - * @param info - Uri Info - * @param headers - Http headers - * - * @return Response - * - * @throws OzoneException - */ - @DELETE - @ApiOperation("Deletes an empty bucket.") - @ApiImplicitParams({ - @ApiImplicitParam(name = "x-ozone-version", example = "v1", required = - true, paramType = "header"), - @ApiImplicitParam(name = "x-ozone-user", example = "user", required = - true, paramType = "header"), - @ApiImplicitParam(name = "Date", example = "Date: Mon, 26 Jun 2017 " - + "04:23:30 GMT", required = true, paramType = "header"), - @ApiImplicitParam(name = "Authorization", example = "OZONE", required = - true, paramType = "header")}) - Response deleteBucket(@PathParam("volume") String volume, - @PathParam("bucket") String bucket, - @Context Request req, @Context UriInfo info, - @Context HttpHeaders headers) throws OzoneException; - - /** - * List Buckets lists the contents of a bucket. - * - * @param volume - Storage Volume Name - * @param bucket - Bucket Name - * @param info - Information type needed - * @param prefix - Prefix for the keys to be fetched - * @param maxKeys - MaxNumber of Keys to Return - * @param prevKey - Continuation Token - * @param req - Http request - * @param headers - Http headers - * - * @return - Json Body - * - * @throws OzoneException - */ - - @GET - @Produces(MediaType.APPLICATION_JSON) - @ApiOperation("List contents of a bucket") - @ApiImplicitParams({ - @ApiImplicitParam(name = "x-ozone-version", example = "v1", required = - true, paramType = "header"), - @ApiImplicitParam(name = "x-ozone-user", example = "user", required = - true, paramType = "header"), - @ApiImplicitParam(name = "Date", example = "Date: Mon, 26 Jun 2017 " - + "04:23:30 GMT", required = true, paramType = "header"), - @ApiImplicitParam(name = "Authorization", example = "OZONE", required = - true, paramType = "header")}) - @SuppressWarnings("parameternumber") - Response listBucket(@PathParam("volume") String volume, - @PathParam("bucket") String bucket, - @DefaultValue(Header.OZONE_INFO_QUERY_KEY) - @QueryParam(Header.OZONE_INFO_QUERY_TAG) - String info, - @QueryParam(Header.OZONE_LIST_QUERY_PREFIX) - String prefix, - @DefaultValue(Header.OZONE_DEFAULT_LIST_SIZE) - @QueryParam(Header.OZONE_LIST_QUERY_MAXKEYS) - int maxKeys, - @QueryParam(Header.OZONE_LIST_QUERY_PREVKEY) - String prevKey, - @Context Request req, @Context UriInfo uriInfo, - @Context HttpHeaders headers) throws OzoneException; - - -} diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/Keys.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/Keys.java deleted file mode 100644 index 1ce81c244bd..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/Keys.java +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.interfaces; - -import io.swagger.annotations.Api; -import io.swagger.annotations.ApiImplicitParam; -import io.swagger.annotations.ApiImplicitParams; -import io.swagger.annotations.ApiOperation; -import org.apache.hadoop.ozone.client.rest.OzoneException; -import org.apache.hadoop.ozone.client.rest.headers.Header; - -import javax.ws.rs.Consumes; -import javax.ws.rs.DELETE; -import javax.ws.rs.GET; -import javax.ws.rs.PUT; -import javax.ws.rs.POST; -import javax.ws.rs.Path; -import javax.ws.rs.PathParam; -import javax.ws.rs.QueryParam; -import javax.ws.rs.core.Context; -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.Request; -import javax.ws.rs.core.Response; -import javax.ws.rs.core.UriInfo; -import java.io.InputStream; - -/** - * This interface defines operations permitted on a key. - */ -@Path("/{volume}/{bucket}/{keys:.*}") -@Api(tags = "key") -public interface Keys { - - /** - * Adds a key to an existing bucket. If the object already exists - * this call will overwrite or add with new version number if the bucket - * versioning is turned on. - * - * @param volume Storage Volume Name - * @param bucket Name of the bucket - * @param keys Name of the Object - * @param is InputStream or File Data - * @param req Request - * @param headers http headers - * - * @return Response - * - * @throws OzoneException - */ - @PUT - @Consumes(MediaType.WILDCARD) - @ApiOperation(value = "Adds a key to an existing bucket.", notes = "If the " - + "object already exists this call will overwrite or add with new version" - + " number if the bucket versioning is turned on.") - @ApiImplicitParams({ - @ApiImplicitParam(name = "x-ozone-version", example = "v1", required = - true, paramType = "header"), - @ApiImplicitParam(name = "x-ozone-user", example = "user", required = - true, paramType = "header"), - @ApiImplicitParam(name = "Date", example = "Date: Mon, 26 Jun 2017 " - + "04:23:30 GMT", required = true, paramType = "header"), - @ApiImplicitParam(name = "Authorization", example = "OZONE", required = - true, paramType = "header")}) - Response putKey(@PathParam("volume") String volume, - @PathParam("bucket") String bucket, @PathParam("keys") String keys, - InputStream is, @Context Request req, @Context UriInfo info, - @Context HttpHeaders headers) throws OzoneException; - - /** - * Gets the Key if it exists. - * - * @param volume Storage Volume - * @param bucket Name of the bucket - * @param keys Object Name - * @param info Tag info - * @param req Request - * @param uriInfo Uri info - * @param headers Http Header - * - * @return Response - * - * @throws OzoneException - */ - @GET - @ApiOperation("Gets the Key if it exists.") - @ApiImplicitParams({ - @ApiImplicitParam(name = "x-ozone-version", example = "v1", required = - true, paramType = "header"), - @ApiImplicitParam(name = "x-ozone-user", example = "user", required = - true, paramType = "header"), - @ApiImplicitParam(name = "Date", example = "Date: Mon, 26 Jun 2017 " - + "04:23:30 GMT", required = true, paramType = "header"), - @ApiImplicitParam(name = "Authorization", example = "OZONE", required = - true, paramType = "header")}) - Response getKey(@PathParam("volume") String volume, - @PathParam("bucket") String bucket, @PathParam("keys") String keys, - @QueryParam(Header.OZONE_INFO_QUERY_TAG) String info, - @Context Request req, @Context UriInfo uriInfo, - @Context HttpHeaders headers) throws OzoneException; - - /** - * Deletes an existing key. - * - * @param volume Storage Volume Name - * @param bucket Name of the bucket - * @param keys Name of the Object - * @param req http Request - * @param headers HttpHeaders - * - * @return Response - * - * @throws OzoneException - */ - @DELETE - @ApiOperation("Deletes an existing key") - @ApiImplicitParams({ - @ApiImplicitParam(name = "x-ozone-version", example = "v1", required = - true, paramType = "header"), - @ApiImplicitParam(name = "x-ozone-user", example = "user", required = - true, paramType = "header"), - @ApiImplicitParam(name = "Date", example = "Date: Mon, 26 Jun 2017 " - + "04:23:30 GMT", required = true, paramType = "header"), - @ApiImplicitParam(name = "Authorization", example = "OZONE", required = - true, paramType = "header")}) - Response deleteKey(@PathParam("volume") String volume, - @PathParam("bucket") String bucket, @PathParam("keys") String keys, - @Context Request req, @Context UriInfo info, @Context HttpHeaders headers) - throws OzoneException; - - /** - * Renames an existing key within a bucket. - * - * @param volume Storage Volume Name - * @param bucket Name of the bucket - * @param keys Name of the Object - * @param req http Request - * @param headers HttpHeaders - * - * @return Response - * - * @throws OzoneException - */ - @POST - @ApiOperation("Renames an existing key within a bucket") - @ApiImplicitParams({ - @ApiImplicitParam(name = "x-ozone-version", example = "v1", required = - true, paramType = "header"), - @ApiImplicitParam(name = "x-ozone-user", example = "user", required = - true, paramType = "header"), - @ApiImplicitParam(name = "Date", example = "Date: Mon, 26 Jun 2017 " - + "04:23:30 GMT", required = true, paramType = "header"), - @ApiImplicitParam(name = "Authorization", example = "OZONE", required = - true, paramType = "header")}) - Response renameKey(@PathParam("volume") String volume, - @PathParam("bucket") String bucket, @PathParam("keys") String keys, - @QueryParam(Header.OZONE_RENAME_TO_KEY_PARAM_NAME) String toKeyName, - @Context Request req, @Context UriInfo info, @Context HttpHeaders headers) - throws OzoneException; -} - diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/StorageHandler.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/StorageHandler.java deleted file mode 100644 index 91481d227b7..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/StorageHandler.java +++ /dev/null @@ -1,308 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.interfaces; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.ozone.client.io.LengthInputStream; -import org.apache.hadoop.ozone.client.rest.OzoneException; -import org.apache.hadoop.ozone.web.handlers.BucketArgs; -import org.apache.hadoop.ozone.web.handlers.KeyArgs; -import org.apache.hadoop.ozone.web.handlers.ListArgs; -import org.apache.hadoop.ozone.web.handlers.VolumeArgs; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.web.response.BucketInfo; -import org.apache.hadoop.ozone.web.response.KeyInfo; -import org.apache.hadoop.ozone.web.response.ListBuckets; -import org.apache.hadoop.ozone.web.response.ListKeys; -import org.apache.hadoop.ozone.web.response.ListVolumes; -import org.apache.hadoop.ozone.web.response.VolumeInfo; - -import java.io.Closeable; -import java.io.IOException; -import java.io.OutputStream; - -/** - * Storage handler Interface is the Interface between - * REST protocol and file system. - * - * We will have two default implementations of this interface. - * One for the local file system that is handy while testing - * and another which will point to the HDFS backend. - */ -@InterfaceAudience.Private -public interface StorageHandler extends Closeable{ - - /** - * Creates a Storage Volume. - * - * @param args - Volume Name - * - * @throws IOException - * @throws OzoneException - */ - void createVolume(VolumeArgs args) throws IOException, OzoneException; - - - /** - * setVolumeOwner - sets the owner of the volume. - * - * @param args owner info is present in the args - * - * @throws IOException - * @throws OzoneException - */ - void setVolumeOwner(VolumeArgs args) throws IOException, OzoneException; - - - /** - * Set Volume Quota. - * - * @param args - Has Quota info - * @param remove - true if the request is to remove the quota - * - * @throws IOException - * @throws OzoneException - */ - void setVolumeQuota(VolumeArgs args, boolean remove) - throws IOException, OzoneException; - - /** - * Checks if a Volume exists and the user with a role specified has access - * to the Volume. - * - * @param volume - Volume Name whose access permissions needs to be checked - * @param acl - requested acls which needs to be checked for access - * - * @return - Boolean - True if the user with a role can access the volume. - * This is possible for owners of the volume and admin users - * - * @throws IOException - * @throws OzoneException - */ - boolean checkVolumeAccess(String volume, OzoneAcl acl) - throws IOException, OzoneException; - - - /** - * Returns the List of Volumes owned by the specific user. - * - * @param args - ListArgs - * - * @return - List of Volumes - * - * @throws IOException - * @throws OzoneException - */ - ListVolumes listVolumes(ListArgs args) throws IOException, OzoneException; - - /** - * Deletes an Empty Volume. - * - * @param args - Volume Args - * - * @throws IOException - * @throws OzoneException - */ - void deleteVolume(VolumeArgs args) throws IOException, OzoneException; - - - /** - * Returns Info about the specified Volume. - * - * @param args - Volume Args - * - * @return VolumeInfo - * - * @throws IOException - * @throws OzoneException - */ - VolumeInfo getVolumeInfo(VolumeArgs args) throws IOException, OzoneException; - - /** - * Creates a Bucket in specified Volume. - * - * @param args BucketArgs- BucketName, UserName and Acls - * - * @throws IOException - */ - void createBucket(BucketArgs args) throws IOException, OzoneException; - - /** - * Enables or disables Bucket Versioning. - * - * @param args - BucketArgs - * - * @throws IOException - */ - void setBucketVersioning(BucketArgs args) throws IOException, OzoneException; - - /** - * Sets the Storage Class of a Bucket. - * - * @param args - BucketArgs - * - * @throws IOException - */ - void setBucketStorageClass(BucketArgs args) - throws IOException, OzoneException; - - /** - * Deletes a bucket if it is empty. - * - * @param args Bucket args structure - * - * @throws IOException - */ - void deleteBucket(BucketArgs args) throws IOException, OzoneException; - - /** - * true if the bucket exists and user has read access - * to the bucket else throws Exception. - * - * @param args Bucket args structure - * - * @throws IOException - */ - void checkBucketAccess(BucketArgs args) throws IOException, OzoneException; - - - /** - * Returns all Buckets of a specified Volume. - * - * @param listArgs -- List Args. - * - * @return ListAllBuckets - * - * @throws OzoneException - */ - ListBuckets listBuckets(ListArgs listArgs) throws - IOException, OzoneException; - - - /** - * Returns Bucket's Metadata as a String. - * - * @param args Bucket args structure - * - * @return Info about the bucket - * - * @throws IOException - */ - BucketInfo getBucketInfo(BucketArgs args) throws IOException, OzoneException; - - /** - * Writes a key in an existing bucket. - * - * @param args KeyArgs - * - * @return InputStream - * - * @throws OzoneException - */ - OutputStream newKeyWriter(KeyArgs args) - throws IOException, OzoneException; - - - /** - * Tells the file system that the object has been written out - * completely and it can do any house keeping operation that needs - * to be done. - * - * @param args Key Args - * - * @param stream - * @throws IOException - */ - void commitKey(KeyArgs args, OutputStream stream) - throws IOException, OzoneException; - - - /** - * Reads a key from an existing bucket. - * - * @param args KeyArgs - * - * @return LengthInputStream - * - * @throws IOException - */ - LengthInputStream newKeyReader(KeyArgs args) - throws IOException, OzoneException; - - - /** - * Deletes an existing key. - * - * @param args KeyArgs - * - * @throws OzoneException - */ - void deleteKey(KeyArgs args) throws IOException, OzoneException; - - /** - * Renames an existing key within a bucket. - * - * @param args KeyArgs - * @param toKeyName New name to be used for the key - * @throws OzoneException - */ - void renameKey(KeyArgs args, String toKeyName) - throws IOException, OzoneException; - - /** - * Returns a list of Key. - * - * @param args KeyArgs - * - * @return BucketList - * - * @throws IOException - */ - ListKeys listKeys(ListArgs args) throws IOException, OzoneException; - - /** - * Get information of the specified Key. - * - * @param args Key Args - * - * @return KeyInfo - * - * @throws IOException - * @throws OzoneException - */ - KeyInfo getKeyInfo(KeyArgs args) throws IOException, OzoneException; - - /** - * Get detail information of the specified Key. - * - * @param args Key Args - * - * @return KeyInfo - * - * @throws IOException - * @throws OzoneException - */ - KeyInfo getKeyInfoDetails(KeyArgs args) throws IOException, OzoneException; - - /** - * Closes all the opened resources. - */ - @Override - void close(); -} diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/UserAuth.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/UserAuth.java deleted file mode 100644 index a1d2e7c193e..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/UserAuth.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.interfaces; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.ozone.client.rest.OzoneException; -import org.apache.hadoop.ozone.web.handlers.UserArgs; - -/** - * This interface is used by Ozone to determine user identity. - * - * Please see concrete implementations for more information - */ -@InterfaceAudience.Private -public interface UserAuth { - /** - * Returns the user name as a string from the URI and HTTP headers. - * - * @param userArgs - userArgs - * - * @return String - User name - * - * @throws OzoneException - */ - String getUser(UserArgs userArgs) throws OzoneException; - - /** - * Returns all the Groups that user is a member of. - * - * @param userArgs - userArgs - * - * @return Array of Groups - * - * @throws OzoneException - */ - String[] getGroups(UserArgs userArgs) throws OzoneException; - - /** - * Returns true if a user is a Admin. - * - * @param userArgs - userArgs - * - * @return true if Admin , false otherwise - * - * @throws OzoneException -- Allows the underlying system - * to throw, that error will get propagated to clients - */ - boolean isAdmin(UserArgs userArgs) throws OzoneException; - - /** - * Returns true if the request is Anonymous. - * - * @param userArgs - userArgs - * - * @return true if the request is anonymous, false otherwise. - * - * @throws OzoneException - Will be propagated back to end user - */ - boolean isAnonymous(UserArgs userArgs) throws OzoneException; - - /** - * Returns true if the name is a recognizable user in the system. - * - * @param userName - User Name to check - * @param userArgs - userArgs - * - * @return true if the username string is the name of a valid user. - * - * @throws OzoneException - Will be propagated back to end user - */ - boolean isUser(String userName, UserArgs userArgs) throws OzoneException; - - /** - * Returns the x-ozone-user or the user on behalf of, This is - * used in Volume creation path. - * - * @param userArgs - userArgs - * - * @return a user name if it has x-ozone-user args in header. - * - * @throws OzoneException - */ - String getOzoneUser(UserArgs userArgs) throws OzoneException; - -} diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/Volume.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/Volume.java deleted file mode 100644 index efb6f68cdd2..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/Volume.java +++ /dev/null @@ -1,185 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.interfaces; - -import io.swagger.annotations.Api; -import io.swagger.annotations.ApiImplicitParam; -import io.swagger.annotations.ApiImplicitParams; -import io.swagger.annotations.ApiOperation; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.ozone.client.rest.OzoneException; -import org.apache.hadoop.ozone.client.rest.headers.Header; - -import javax.ws.rs.DELETE; -import javax.ws.rs.DefaultValue; -import javax.ws.rs.GET; -import javax.ws.rs.POST; -import javax.ws.rs.PUT; -import javax.ws.rs.Path; -import javax.ws.rs.PathParam; -import javax.ws.rs.QueryParam; -import javax.ws.rs.core.Context; -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.Request; -import javax.ws.rs.core.Response; -import javax.ws.rs.core.UriInfo; - -/** - * Volume Interface acts as the HTTP entry point for - * volume related functionality. - */ -@InterfaceAudience.Private -@Path("/{volume}") -@Api(tags = "volume") -public interface Volume { - - /** - * Creates a Volume owned by the user. - * - * Params : - * Quota - Specifies the Maximum usable size by the user - * the valid parameters for quota are {@literal ()} - * | remove. For example 10GB or "remove". - * - * @param volume Volume Name, this has to be unique at Ozone Level - * @param quota Quota for this Storage Volume - {@literal ()} - * | remove - * @param req - Request Object - Request Object - * @param uriInfo - Http UriInfo - * @param headers Http Headers HttpHeaders - * - * @return Response - * - * @throws OzoneException - */ - - @POST - @ApiOperation("Creates a Volume owned by the user") - @ApiImplicitParams({ - @ApiImplicitParam(name = "x-ozone-version", example = "v1", required = - true, paramType = "header"), - @ApiImplicitParam(name = "x-ozone-user", example = "user", required = - true, paramType = "header"), - @ApiImplicitParam(name = "Date", example = "Date: Mon, 26 Jun 2017 " - + "04:23:30 GMT", required = true, paramType = "header"), - @ApiImplicitParam(name = "Authorization", example = "OZONE", required = - true, paramType = "header")}) - Response createVolume(@PathParam("volume") String volume, - @DefaultValue(Header.OZONE_QUOTA_UNDEFINED) - @QueryParam(Header.OZONE_QUOTA_QUERY_TAG) String quota, - @Context Request req, @Context UriInfo uriInfo, - @Context HttpHeaders headers) throws OzoneException; - - /** - * Updates a Volume owned by the user. - * - * Params : - * Owner - Specifies the name of the owner - * Quota - Specifies the Maximum usable size by the user - * the valid parameters for quota are {@literal ()} | remove. - * For example 10GB or "remove". - * - * @param volume Volume Name, this has to be unique at Ozone Level - * @param quota Quota for this Storage Volume - {@literal ()} - * | remove - * @param req - Request Object - Request Object - * @param headers Http Headers HttpHeaders - * - * @return Response - * - * @throws OzoneException - */ - @PUT - @ApiOperation("Updates a Volume owned by the user") - @ApiImplicitParams({ - @ApiImplicitParam(name = "x-ozone-version", example = "v1", required = - true, paramType = "header"), - @ApiImplicitParam(name = "x-ozone-user", example = "user", required = - true, paramType = "header"), - @ApiImplicitParam(name = "Date", example = "Date: Mon, 26 Jun 2017 " - + "04:23:30 GMT", required = true, paramType = "header"), - @ApiImplicitParam(name = "Authorization", example = "OZONE", required = - true, paramType = "header")}) - Response updateVolume(@PathParam("volume") String volume, - @DefaultValue(Header.OZONE_QUOTA_UNDEFINED) - @QueryParam(Header.OZONE_QUOTA_QUERY_TAG) String quota, - @Context Request req, @Context UriInfo uriInfo, - @Context HttpHeaders headers) throws OzoneException; - - /** - * Deletes a Volume if it is empty. - * - * @param volume Storage Volume Name - * - * @return Response Response - * - * @throws OzoneException - */ - @DELETE - @ApiOperation("Deletes a Volume if it is empty") - @ApiImplicitParams({ - @ApiImplicitParam(name = "x-ozone-version", example = "v1", required = - true, paramType = "header"), - @ApiImplicitParam(name = "x-ozone-user", example = "user", required = - true, paramType = "header"), - @ApiImplicitParam(name = "Date", example = "Date: Mon, 26 Jun 2017 " - + "04:23:30 GMT", required = true, paramType = "header"), - @ApiImplicitParam(name = "Authorization", example = "OZONE", required = - true, paramType = "header")}) - Response deleteVolume(@PathParam("volume") String volume, - @Context Request req, @Context UriInfo uriInfo, - @Context HttpHeaders headers) throws OzoneException; - - /** - * Returns Volume info. This API can be invoked either - * by admin or the owner - * - * @param volume - Storage Volume Name - * @param req - Http Req - * @param headers - Http headers - * - * @return - Response - * - * @throws OzoneException - */ - @GET - @ApiOperation(value = "Returns Volume info", notes = "This API can be " - + "invoked either by admin or the owner") - @ApiImplicitParams({ - @ApiImplicitParam(name = "x-ozone-version", example = "v1", required = - true, paramType = "header"), - @ApiImplicitParam(name = "x-ozone-user", example = "user", required = - true, paramType = "header"), - @ApiImplicitParam(name = "Date", example = "Date: Mon, 26 Jun 2017 " - + "04:23:30 GMT", required = true, paramType = "header"), - @ApiImplicitParam(name = "Authorization", example = "OZONE", required = - true, paramType = "header")}) - @SuppressWarnings("parameternumber") - Response getVolumeInfo(@PathParam("volume") String volume, - @DefaultValue(Header.OZONE_INFO_QUERY_BUCKET) - @QueryParam(Header.OZONE_INFO_QUERY_TAG) String info, - @QueryParam(Header.OZONE_LIST_QUERY_PREFIX) String prefix, - @DefaultValue(Header.OZONE_DEFAULT_LIST_SIZE) - @QueryParam(Header.OZONE_LIST_QUERY_MAXKEYS) int keys, - @QueryParam(Header.OZONE_LIST_QUERY_PREVKEY) String prevKey, - @QueryParam(Header.OZONE_LIST_QUERY_ROOTSCAN) boolean rootScan, - @Context Request req, @Context UriInfo uriInfo, - @Context HttpHeaders headers) throws OzoneException; - -} diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/package-info.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/package-info.java deleted file mode 100644 index 940f1799e3c..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.interfaces; - -/** - This package contains ozone client side libraries. - */ diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/localstorage/package-info.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/localstorage/package-info.java deleted file mode 100644 index 6bf66433ea6..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/localstorage/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.localstorage; \ No newline at end of file diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/messages/LengthInputStreamMessageBodyWriter.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/messages/LengthInputStreamMessageBodyWriter.java deleted file mode 100644 index 6db49d5428d..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/messages/LengthInputStreamMessageBodyWriter.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.messages; - -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ozone.client.io.LengthInputStream; - -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.MultivaluedMap; -import javax.ws.rs.ext.MessageBodyWriter; -import java.io.IOException; -import java.io.OutputStream; -import java.lang.annotation.Annotation; -import java.lang.reflect.Type; - -/** - * Writes outbound HTTP response object bytes. The content length is determined - * from the {@link LengthInputStream}. - */ -public final class LengthInputStreamMessageBodyWriter - implements MessageBodyWriter { - private static final int CHUNK_SIZE = 8192; - - @Override - public long getSize(LengthInputStream lis, Class type, Type genericType, - Annotation[] annotations, MediaType mediaType) { - return lis.getLength(); - } - - @Override - public boolean isWriteable(Class type, Type genericType, - Annotation[] annotations, MediaType mediaType) { - return LengthInputStream.class.isAssignableFrom(type); - } - - @Override - public void writeTo(LengthInputStream lis, Class type, Type genericType, - Annotation[] annotations, MediaType mediaType, - MultivaluedMap httpHeaders, - OutputStream out) throws IOException { - IOUtils.copyBytes(lis, out, CHUNK_SIZE); - } -} diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/messages/StringMessageBodyWriter.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/messages/StringMessageBodyWriter.java deleted file mode 100644 index ad637af9dd9..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/messages/StringMessageBodyWriter.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.messages; - -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; - -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.MultivaluedMap; -import javax.ws.rs.ext.MessageBodyWriter; -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.io.OutputStream; -import java.lang.annotation.Annotation; -import java.lang.reflect.Type; - -/** - * Writes outbound HTTP response strings. We use this rather than the built-in - * writer so that we can determine content length from the string length instead - * of possibly falling back to a chunked response. - */ -public final class StringMessageBodyWriter implements - MessageBodyWriter { - private static final int CHUNK_SIZE = 8192; - - @Override - public long getSize(String str, Class type, Type genericType, - Annotation[] annotations, MediaType mediaType) { - return str.length(); - } - - @Override - public boolean isWriteable(Class type, Type genericType, - Annotation[] annotations, MediaType mediaType) { - return String.class.isAssignableFrom(type); - } - - @Override - public void writeTo(String str, Class type, Type genericType, - Annotation[] annotations, MediaType mediaType, - MultivaluedMap httpHeaders, - OutputStream out) throws IOException { - IOUtils.copyBytes(new ByteArrayInputStream( - str.getBytes(OzoneUtils.ENCODING)), out, CHUNK_SIZE); - } -} diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/messages/package-info.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/messages/package-info.java deleted file mode 100644 index 273b3f59227..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/messages/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.messages; \ No newline at end of file diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/CloseableCleanupListener.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/CloseableCleanupListener.java deleted file mode 100644 index 3d9db207534..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/CloseableCleanupListener.java +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.netty; - -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelFutureListener; -import org.apache.hadoop.io.IOUtils; - -import java.io.Closeable; - -/** - * A {@link ChannelFutureListener} that closes {@link Closeable} resources. - */ -final class CloseableCleanupListener implements ChannelFutureListener { - - private final Closeable[] closeables; - - /** - * Creates a new CloseableCleanupListener. - * - * @param closeables any number of closeable resources - */ - CloseableCleanupListener(Closeable... closeables) { - this.closeables = closeables; - } - - @Override - public void operationComplete(ChannelFuture future) { - IOUtils.cleanupWithLogger(null, closeables); - } -} diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/ObjectStoreChannelHandler.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/ObjectStoreChannelHandler.java deleted file mode 100644 index 89c196cf506..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/ObjectStoreChannelHandler.java +++ /dev/null @@ -1,78 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.netty; - -import io.netty.channel.ChannelFutureListener; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.SimpleChannelInboundHandler; -import io.netty.handler.codec.http.DefaultFullHttpResponse; -import io.netty.handler.codec.http.HttpResponse; -import io.netty.handler.codec.http.HttpResponseStatus; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static io.netty.handler.codec.http.HttpHeaders.Names.CONNECTION; -import static io.netty.handler.codec.http.HttpHeaders.Names.CONTENT_LENGTH; -import static io.netty.handler.codec.http.HttpHeaders.Values.CLOSE; -import static io.netty.handler.codec.http.HttpResponseStatus.INTERNAL_SERVER_ERROR; -import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; - -/** - * Abstract base class for the multiple Netty channel handlers used in the - * Object Store Netty channel pipeline. - */ -abstract class ObjectStoreChannelHandler - extends SimpleChannelInboundHandler { - - /** Log usable in all subclasses. */ - protected static final Logger LOG = - LoggerFactory.getLogger(ObjectStoreChannelHandler.class); - - /** - * Handles uncaught exceptions in the channel pipeline by sending an internal - * server error response if the channel is still active. - * - * @param ctx ChannelHandlerContext to receive response - * @param cause Throwable that was unhandled in the channel pipeline - */ - @Override - public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { - LOG.error("Unexpected exception in Netty pipeline.", cause); - if (ctx.channel().isActive()) { - sendErrorResponse(ctx, INTERNAL_SERVER_ERROR); - } - } - - /** - * Sends an error response. This method is used when an unexpected error is - * encountered within the channel pipeline, outside of the actual Object Store - * application. It always closes the connection, because we can't in general - * know the state of the connection when these errors occur, so attempting to - * keep the connection alive could be unpredictable. - * - * @param ctx ChannelHandlerContext to receive response - * @param status HTTP response status - */ - protected static void sendErrorResponse(ChannelHandlerContext ctx, - HttpResponseStatus status) { - HttpResponse nettyResp = new DefaultFullHttpResponse(HTTP_1_1, status); - nettyResp.headers().set(CONTENT_LENGTH, 0); - nettyResp.headers().set(CONNECTION, CLOSE); - ctx.writeAndFlush(nettyResp).addListener(ChannelFutureListener.CLOSE); - } -} diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/ObjectStoreJerseyContainer.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/ObjectStoreJerseyContainer.java deleted file mode 100644 index c7b516f9f1a..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/ObjectStoreJerseyContainer.java +++ /dev/null @@ -1,348 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.netty; - -import static io.netty.handler.codec.http.HttpHeaders.Names.CONTENT_LENGTH; -import static io.netty.handler.codec.http.HttpHeaders.Names.CONNECTION; -import static io.netty.handler.codec.http.HttpHeaders.Names.TRANSFER_ENCODING; -import static io.netty.handler.codec.http.HttpHeaders.Names.HOST; -import static io.netty.handler.codec.http.HttpHeaders.Values.KEEP_ALIVE; -import static io.netty.handler.codec.http.HttpHeaders.Values.CLOSE; -import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.net.URI; -import java.net.URISyntaxException; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CancellationException; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Future; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.TimeUnit; - -import com.sun.jersey.core.header.InBoundHeaders; -import com.sun.jersey.spi.container.ContainerRequest; -import com.sun.jersey.spi.container.ContainerResponse; -import com.sun.jersey.spi.container.ContainerResponseWriter; -import com.sun.jersey.spi.container.WebApplication; - -import io.netty.handler.codec.http.DefaultHttpResponse; -//import io.netty.handler.codec.http.HttpUtil; -import io.netty.handler.codec.http.HttpHeaders; -import io.netty.handler.codec.http.HttpRequest; -import io.netty.handler.codec.http.HttpResponse; -import io.netty.handler.codec.http.HttpResponseStatus; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ozone.web.interfaces.StorageHandler; -import org.apache.hadoop.ozone.web.handlers.StorageHandlerBuilder; - -/** - * This is a custom Jersey container that hosts the Object Store web - * application. It supports dispatching an inbound Netty {@link HttpRequest} - * to the Object Store Jersey application. Request dispatching must run - * asynchronously, because the Jersey application must consume the inbound - * HTTP request from a piped stream and produce the outbound HTTP response - * for another piped stream.The Netty channel handlers consume the connected - * ends of these piped streams. Request dispatching cannot run directly on - * the Netty threads, or there would be a risk of deadlock (one thread - * producing/consuming its end of the pipe while no other thread is - * producing/consuming the opposite end). - */ -public final class ObjectStoreJerseyContainer { - - private static final Logger LOG = - LoggerFactory.getLogger(ObjectStoreJerseyContainer.class); - - private final WebApplication webapp; - - private StorageHandler storageHandler; - - /** - * Creates a new ObjectStoreJerseyContainer. - * - * @param webapp web application - */ - public ObjectStoreJerseyContainer(WebApplication webapp) { - this.webapp = webapp; - } - - /** - * Sets the {@link StorageHandler}. This must be called before dispatching any - * requests. - * - * @param newStorageHandler {@link StorageHandler} implementation - */ - public void setStorageHandler(StorageHandler newStorageHandler) { - this.storageHandler = newStorageHandler; - } - - /** - * Asynchronously executes an HTTP request. - * - * @param nettyReq HTTP request - * @param reqIn input stream for reading request body - * @param respOut output stream for writing response body - */ - public Future dispatch(HttpRequest nettyReq, InputStream reqIn, - OutputStream respOut) { - // The request executes on a separate background thread. As soon as enough - // processing has completed to bootstrap the outbound response, the thread - // counts down on a latch. This latch also unblocks callers trying to get - // the asynchronous response out of the returned future. - final CountDownLatch latch = new CountDownLatch(1); - final RequestRunner runner = new RequestRunner(nettyReq, reqIn, respOut, - latch); - final Thread thread = new Thread(runner); - thread.setDaemon(true); - thread.start(); - return new Future() { - - private volatile boolean isCancelled = false; - - @Override - public boolean cancel(boolean mayInterruptIfRunning) { - if (latch.getCount() == 0) { - return false; - } - if (!mayInterruptIfRunning) { - return false; - } - if (!thread.isAlive()) { - return false; - } - thread.interrupt(); - try { - thread.join(); - } catch (InterruptedException e) { - LOG.info("Interrupted while attempting to cancel dispatch thread."); - Thread.currentThread().interrupt(); - return false; - } - isCancelled = true; - return true; - } - - @Override - public HttpResponse get() - throws InterruptedException, ExecutionException { - checkCancelled(); - latch.await(); - return this.getOrThrow(); - } - - @Override - public HttpResponse get(long timeout, TimeUnit unit) - throws InterruptedException, ExecutionException, TimeoutException { - checkCancelled(); - if (!latch.await(timeout, unit)) { - throw new TimeoutException(String.format( - "Timed out waiting for HttpResponse after %d %s.", - timeout, unit.toString().toLowerCase())); - } - return this.getOrThrow(); - } - - @Override - public boolean isCancelled() { - return isCancelled; - } - - @Override - public boolean isDone() { - return !isCancelled && latch.getCount() == 0; - } - - private void checkCancelled() { - if (isCancelled()) { - throw new CancellationException(); - } - } - - private HttpResponse getOrThrow() throws ExecutionException { - try { - return runner.getResponse(); - } catch (Exception e) { - throw new ExecutionException(e); - } - } - }; - } - - /** - * Runs the actual handling of the HTTP request. - */ - private final class RequestRunner implements Runnable, - ContainerResponseWriter { - - private final CountDownLatch latch; - private final HttpRequest nettyReq; - private final InputStream reqIn; - private final OutputStream respOut; - - private Exception exception; - private HttpResponse nettyResp; - - /** - * Creates a new RequestRunner. - * - * @param nettyReq HTTP request - * @param reqIn input stream for reading request body - * @param respOut output stream for writing response body - * @param latch for coordinating asynchronous return of HTTP response - */ - RequestRunner(HttpRequest nettyReq, InputStream reqIn, - OutputStream respOut, CountDownLatch latch) { - this.latch = latch; - this.nettyReq = nettyReq; - this.reqIn = reqIn; - this.respOut = respOut; - } - - @Override - public void run() { - LOG.trace("begin RequestRunner, nettyReq = {}", this.nettyReq); - StorageHandlerBuilder.setStorageHandler( - ObjectStoreJerseyContainer.this.storageHandler); - try { - ContainerRequest jerseyReq = nettyRequestToJerseyRequest( - ObjectStoreJerseyContainer.this.webapp, this.nettyReq, this.reqIn); - ObjectStoreJerseyContainer.this.webapp.handleRequest(jerseyReq, this); - } catch (Exception e) { - LOG.error("Error running Jersey Request Runner", e); - this.exception = e; - this.latch.countDown(); - } finally { - IOUtils.cleanupWithLogger(null, this.reqIn, this.respOut); - StorageHandlerBuilder.removeStorageHandler(); - } - LOG.trace("end RequestRunner, nettyReq = {}", this.nettyReq); - } - - /** - * This is a callback triggered by Jersey as soon as dispatch has completed - * to the point of knowing what kind of response to return. We save the - * response and trigger the latch to unblock callers waiting on the - * asynchronous return of the response. Our response always sets a - * Content-Length header. (We do not support Transfer-Encoding: chunked.) - * We also return the output stream for Jersey to use for writing the - * response body. - * - * @param contentLength length of response - * @param jerseyResp HTTP response returned by Jersey - * @return OutputStream for Jersey to use for writing the response body - */ - @Override - public OutputStream writeStatusAndHeaders(long contentLength, - ContainerResponse jerseyResp) { - LOG.trace( - "begin writeStatusAndHeaders, contentLength = {}, jerseyResp = {}.", - contentLength, jerseyResp); - this.nettyResp = jerseyResponseToNettyResponse(jerseyResp); - this.nettyResp.headers().set(CONTENT_LENGTH, Math.max(0, contentLength)); - this.nettyResp.headers().set(CONNECTION, - HttpHeaders.isKeepAlive(this.nettyReq) ? KEEP_ALIVE : CLOSE); - this.latch.countDown(); - LOG.trace( - "end writeStatusAndHeaders, contentLength = {}, jerseyResp = {}.", - contentLength, jerseyResp); - return this.respOut; - } - - /** - * This is a callback triggered by Jersey after it has completed writing the - * response body to the stream. We must close the stream here to unblock - * the Netty thread consuming the last chunk of the response from the input - * end of the piped stream. - * - * @throws IOException if there is an I/O error - */ - @Override - public void finish() throws IOException { - IOUtils.cleanupWithLogger(null, this.respOut); - } - - /** - * Gets the HTTP response calculated by the Jersey application, or throws an - * exception if an error occurred during processing. It only makes sense to - * call this method after waiting on the latch to trigger. - * - * @return HTTP response - * @throws Exception if there was an error executing the request - */ - public HttpResponse getResponse() throws Exception { - if (this.exception != null) { - throw this.exception; - } - return this.nettyResp; - } - } - - /** - * Converts a Jersey HTTP response object to a Netty HTTP response object. - * - * @param jerseyResp Jersey HTTP response - * @return Netty HTTP response - */ - private static HttpResponse jerseyResponseToNettyResponse( - ContainerResponse jerseyResp) { - HttpResponse nettyResp = new DefaultHttpResponse(HTTP_1_1, - HttpResponseStatus.valueOf(jerseyResp.getStatus())); - for (Map.Entry> header : - jerseyResp.getHttpHeaders().entrySet()) { - if (!header.getKey().equalsIgnoreCase(CONTENT_LENGTH.toString()) && - !header.getKey().equalsIgnoreCase(TRANSFER_ENCODING.toString())) { - nettyResp.headers().set(header.getKey(), header.getValue()); - } - } - return nettyResp; - } - - /** - * Converts a Netty HTTP request object to a Jersey HTTP request object. - * - * @param webapp web application - * @param nettyReq Netty HTTP request - * @param reqIn input stream for reading request body - * @return Jersey HTTP request - * @throws URISyntaxException if there is an error handling the request URI - */ - private static ContainerRequest nettyRequestToJerseyRequest( - WebApplication webapp, HttpRequest nettyReq, InputStream reqIn) - throws URISyntaxException { - HttpHeaders nettyHeaders = nettyReq.headers(); - InBoundHeaders jerseyHeaders = new InBoundHeaders(); - for (String name : nettyHeaders.names()) { - jerseyHeaders.put(name, nettyHeaders.getAll(name)); - } - String host = nettyHeaders.get(HOST); - String scheme = host.startsWith("https") ? "https://" : "http://"; - String baseUri = scheme + host + "/"; - String reqUri = scheme + host + nettyReq.getUri(); - LOG.trace("baseUri = {}, reqUri = {}", baseUri, reqUri); - return new ContainerRequest(webapp, nettyReq.getMethod().name(), - new URI(baseUri), new URI(reqUri), jerseyHeaders, reqIn); - } -} diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/ObjectStoreJerseyContainerProvider.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/ObjectStoreJerseyContainerProvider.java deleted file mode 100644 index e9439694e87..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/ObjectStoreJerseyContainerProvider.java +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.netty; - -import com.sun.jersey.api.container.ContainerException; -import com.sun.jersey.api.core.ResourceConfig; -import com.sun.jersey.spi.container.ContainerProvider; -import com.sun.jersey.spi.container.WebApplication; - -/** - * This is a Jersey {@link ContainerProvider} capable of boostrapping the - * Object Store web application into a custom container. It must be registered - * using the Java service loader mechanism by listing it in - * META-INF/services/com.sun.jersey.spi.container.ContainerProvider . - */ -public final class ObjectStoreJerseyContainerProvider - implements ContainerProvider { - - @Override - public ObjectStoreJerseyContainer createContainer( - Class type, ResourceConfig conf, - WebApplication webapp) throws ContainerException { - return new ObjectStoreJerseyContainer(webapp); - } -} diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/ObjectStoreRestHttpServer.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/ObjectStoreRestHttpServer.java deleted file mode 100644 index 8ab932572aa..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/ObjectStoreRestHttpServer.java +++ /dev/null @@ -1,215 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.netty; - -import javax.servlet.FilterConfig; -import javax.servlet.ServletContext; -import java.io.Closeable; -import java.io.IOException; -import java.net.BindException; -import java.net.InetSocketAddress; -import java.net.SocketAddress; -import java.net.SocketException; -import java.nio.channels.ServerSocketChannel; -import java.util.Enumeration; -import java.util.Map; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.security.http.RestCsrfPreventionFilter; - -import io.netty.bootstrap.ChannelFactory; -import io.netty.bootstrap.ServerBootstrap; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelInitializer; -import io.netty.channel.ChannelOption; -import io.netty.channel.ChannelPipeline; -import io.netty.channel.EventLoopGroup; -import io.netty.channel.nio.NioEventLoopGroup; -import io.netty.channel.socket.SocketChannel; -import io.netty.channel.socket.nio.NioServerSocketChannel; -import io.netty.handler.codec.http.HttpRequestDecoder; -import io.netty.handler.codec.http.HttpResponseEncoder; -import io.netty.handler.stream.ChunkedWriteHandler; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .HDDS_REST_HTTP_ADDRESS_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .HDDS_REST_HTTP_ADDRESS_KEY; - -/** - * Netty based web server for Hdds rest api server. - *

- * Based on the Datanode http serer. - */ -public class ObjectStoreRestHttpServer implements Closeable { - private final EventLoopGroup bossGroup; - private final EventLoopGroup workerGroup; - private final ServerSocketChannel externalHttpChannel; - private final ServerBootstrap httpServer; - private final Configuration conf; - private final Configuration confForCreate; - private InetSocketAddress httpAddress; - static final Log LOG = LogFactory.getLog(ObjectStoreRestHttpServer.class); - private final ObjectStoreHandler objectStoreHandler; - - public ObjectStoreRestHttpServer(final Configuration conf, - final ServerSocketChannel externalHttpChannel, - ObjectStoreHandler objectStoreHandler) throws IOException { - this.conf = conf; - - this.confForCreate = new Configuration(conf); - this.objectStoreHandler = objectStoreHandler; - confForCreate.set(FsPermission.UMASK_LABEL, "000"); - - this.bossGroup = new NioEventLoopGroup(); - this.workerGroup = new NioEventLoopGroup(); - this.externalHttpChannel = externalHttpChannel; - - this.httpServer = new ServerBootstrap(); - this.httpServer.group(bossGroup, workerGroup); - this.httpServer.childHandler(new ChannelInitializer() { - @Override - protected void initChannel(SocketChannel ch) throws Exception { - ChannelPipeline p = ch.pipeline(); - p.addLast(new HttpRequestDecoder(), new HttpResponseEncoder()); - // Later we have to support cross-site request forgery (CSRF) Filter - p.addLast(new ChunkedWriteHandler(), new ObjectStoreURLDispatcher( - objectStoreHandler.getObjectStoreJerseyContainer())); - } - }); - - this.httpServer.childOption(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, - conf.getInt(ScmConfigKeys.HDDS_REST_NETTY_HIGH_WATERMARK, - ScmConfigKeys.HDDS_REST_NETTY_HIGH_WATERMARK_DEFAULT)); - this.httpServer.childOption(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, - conf.getInt(ScmConfigKeys.HDDS_REST_NETTY_LOW_WATERMARK, - ScmConfigKeys.HDDS_REST_NETTY_LOW_WATERMARK_DEFAULT)); - - if (externalHttpChannel == null) { - httpServer.channel(NioServerSocketChannel.class); - } else { - httpServer.channelFactory( - (ChannelFactory) () -> new - NioServerSocketChannel( - externalHttpChannel) { - // The channel has been bounded externally via JSVC, - // thus bind() becomes a no-op. - @Override - protected void doBind(SocketAddress localAddress) throws Exception { - } - }); - } - - } - - public InetSocketAddress getHttpAddress() { - return httpAddress; - } - - public void start() throws IOException { - if (httpServer != null) { - - InetSocketAddress infoAddr = NetUtils.createSocketAddr( - conf.getTrimmed(HDDS_REST_HTTP_ADDRESS_KEY, - HDDS_REST_HTTP_ADDRESS_DEFAULT)); - - ChannelFuture f = httpServer.bind(infoAddr); - try { - f.syncUninterruptibly(); - } catch (Throwable e) { - if (e instanceof BindException) { - throw NetUtils.wrapException(null, 0, infoAddr.getHostName(), - infoAddr.getPort(), (SocketException) e); - } else { - throw e; - } - } - httpAddress = (InetSocketAddress) f.channel().localAddress(); - LOG.info("Listening HDDS REST traffic on " + httpAddress); - } - - } - - @Override - public void close() throws IOException { - bossGroup.shutdownGracefully(); - workerGroup.shutdownGracefully(); - if (externalHttpChannel != null) { - externalHttpChannel.close(); - } - } - - /** - * A minimal {@link FilterConfig} implementation backed by a {@link Map}. - */ - private static final class MapBasedFilterConfig implements FilterConfig { - - private final String filterName; - private final Map parameters; - - /** - * Creates a new MapBasedFilterConfig. - * - * @param filterName filter name - * @param parameters mapping of filter initialization parameters - */ - MapBasedFilterConfig(String filterName, - Map parameters) { - this.filterName = filterName; - this.parameters = parameters; - } - - @Override - public String getFilterName() { - return this.filterName; - } - - @Override - public String getInitParameter(String name) { - return this.parameters.get(name); - } - - @Override - public Enumeration getInitParameterNames() { - throw this.notImplemented(); - } - - @Override - public ServletContext getServletContext() { - throw this.notImplemented(); - } - - /** - * Creates an exception indicating that an interface method is not - * implemented. These should never be seen in practice, because it is only - * used for methods that are not called by {@link RestCsrfPreventionFilter}. - * - * @return exception indicating method not implemented - */ - private UnsupportedOperationException notImplemented() { - return new UnsupportedOperationException( - this.getClass().getSimpleName() + " does not implement this method."); - } - } -} - diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/ObjectStoreURLDispatcher.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/ObjectStoreURLDispatcher.java deleted file mode 100644 index 9020c141ccf..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/ObjectStoreURLDispatcher.java +++ /dev/null @@ -1,61 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.netty; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; - -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelPipeline; -import io.netty.channel.SimpleChannelInboundHandler; -import io.netty.handler.codec.http.HttpRequest; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Url dispatcher to bridge netty request to jetty container. - */ -@InterfaceAudience.Private -public class ObjectStoreURLDispatcher - extends SimpleChannelInboundHandler { - protected static final Logger LOG = - LoggerFactory.getLogger(ObjectStoreURLDispatcher.class); - private final ObjectStoreJerseyContainer objectStoreJerseyContainer; - - ObjectStoreURLDispatcher( - ObjectStoreJerseyContainer objectStoreJerseyContainer) - throws IOException { - - this.objectStoreJerseyContainer = objectStoreJerseyContainer; - } - - @Override - protected void channelRead0(ChannelHandlerContext ctx, HttpRequest req) - throws Exception { - ChannelPipeline p = ctx.pipeline(); - RequestDispatchObjectStoreChannelHandler h = - new RequestDispatchObjectStoreChannelHandler( - this.objectStoreJerseyContainer); - p.replace(this, - RequestDispatchObjectStoreChannelHandler.class.getSimpleName(), h); - h.channelRead0(ctx, req); - - } - -} diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/RequestContentObjectStoreChannelHandler.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/RequestContentObjectStoreChannelHandler.java deleted file mode 100644 index 0a2f22d6b13..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/RequestContentObjectStoreChannelHandler.java +++ /dev/null @@ -1,117 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.netty; - -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelFutureListener; -import io.netty.channel.ChannelHandlerContext; -import io.netty.handler.codec.http.HttpContent; -import io.netty.handler.codec.http.HttpHeaders; -import io.netty.handler.codec.http.HttpRequest; -import io.netty.handler.codec.http.HttpResponse; -import io.netty.handler.codec.http.LastHttpContent; -import io.netty.handler.stream.ChunkedStream; -import org.apache.hadoop.io.IOUtils; - -import java.io.InputStream; -import java.io.OutputStream; -import java.util.concurrent.Future; - -/** - * Object Store Netty channel pipeline handler that handles inbound - * {@link HttpContent} fragments for the request body by sending the bytes into - * the pipe so that the application dispatch thread can read it. - * After receiving the {@link LastHttpContent}, this handler also flushes the - * response. - */ -public final class RequestContentObjectStoreChannelHandler - extends ObjectStoreChannelHandler { - - private final HttpRequest nettyReq; - private final Future nettyResp; - private final OutputStream reqOut; - private final InputStream respIn; - private ObjectStoreJerseyContainer jerseyContainer; - - /** - * Creates a new RequestContentObjectStoreChannelHandler. - * - * @param nettyReq HTTP request - * @param nettyResp asynchronous HTTP response - * @param reqOut output stream for writing request body - * @param respIn input stream for reading response body - * @param jerseyContainer jerseyContainer to handle the request - */ - public RequestContentObjectStoreChannelHandler(HttpRequest nettyReq, - Future nettyResp, OutputStream reqOut, InputStream respIn, - ObjectStoreJerseyContainer jerseyContainer) { - this.nettyReq = nettyReq; - this.nettyResp = nettyResp; - this.reqOut = reqOut; - this.respIn = respIn; - this.jerseyContainer = jerseyContainer; - } - - @Override - public void channelReadComplete(ChannelHandlerContext ctx) throws Exception { - ctx.flush(); - } - - @Override - public void channelRead0(ChannelHandlerContext ctx, HttpContent content) - throws Exception { - LOG.trace( - "begin RequestContentObjectStoreChannelHandler channelRead0, " + - "ctx = {}, content = {}", ctx, content); - content.content().readBytes(this.reqOut, content.content().readableBytes()); - if (content instanceof LastHttpContent) { - IOUtils.cleanupWithLogger(null, this.reqOut); - ctx.write(this.nettyResp.get()); - ChannelFuture respFuture = ctx.writeAndFlush(new ChunkedStream( - this.respIn)); - respFuture.addListener(new CloseableCleanupListener(this.respIn)); - if (!HttpHeaders.isKeepAlive(this.nettyReq)) { - respFuture.addListener(ChannelFutureListener.CLOSE); - } else { - respFuture.addListener(new ChannelFutureListener() { - @Override - public void operationComplete(ChannelFuture future) throws Exception { - // Notify client this is the last content for current request. - ctx.writeAndFlush(LastHttpContent.EMPTY_LAST_CONTENT); - // Reset the pipeline handler for next request to reuses the - // same connection. - RequestDispatchObjectStoreChannelHandler h = - new RequestDispatchObjectStoreChannelHandler(jerseyContainer); - ctx.pipeline().replace(ctx.pipeline().last(), - RequestDispatchObjectStoreChannelHandler.class.getSimpleName(), - h); - } - }); - } - } - LOG.trace( - "end RequestContentObjectStoreChannelHandler channelRead0, " + - "ctx = {}, content = {}", ctx, content); - } - - @Override - public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { - super.exceptionCaught(ctx, cause); - IOUtils.cleanupWithLogger(null, this.reqOut, this.respIn); - } -} diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/RequestDispatchObjectStoreChannelHandler.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/RequestDispatchObjectStoreChannelHandler.java deleted file mode 100644 index add827a6740..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/RequestDispatchObjectStoreChannelHandler.java +++ /dev/null @@ -1,103 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.netty; - -import io.netty.channel.ChannelHandlerContext; -import io.netty.handler.codec.http.DefaultFullHttpResponse; -import io.netty.handler.codec.http.HttpHeaders; -import io.netty.handler.codec.http.HttpRequest; -import io.netty.handler.codec.http.HttpResponse; -import org.apache.hadoop.io.IOUtils; - -import java.io.PipedInputStream; -import java.io.PipedOutputStream; -import java.util.concurrent.Future; - -import static io.netty.handler.codec.http.HttpResponseStatus.BAD_REQUEST; -import static io.netty.handler.codec.http.HttpResponseStatus.CONTINUE; -import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; - -/** - * Object Store Netty channel pipeline handler that handles an inbound - * {@link HttpRequest} by dispatching it to the Object Store Jersey container. - * The handler establishes 2 sets of connected piped streams: one for inbound - * request handling and another for outbound response handling. The relevant - * ends of these pipes are handed off to the Jersey application dispatch and the - * next channel handler, which is responsible for streaming in the inbound - * request body and flushing out the response body. - */ -public final class RequestDispatchObjectStoreChannelHandler - extends ObjectStoreChannelHandler { - - private final ObjectStoreJerseyContainer jerseyContainer; - - private PipedInputStream reqIn; - private PipedOutputStream reqOut; - private PipedInputStream respIn; - private PipedOutputStream respOut; - - /** - * Creates a new RequestDispatchObjectStoreChannelHandler. - * - * @param jerseyContainer Object Store application Jersey container for - * request dispatch - */ - public RequestDispatchObjectStoreChannelHandler( - ObjectStoreJerseyContainer jerseyContainer) { - this.jerseyContainer = jerseyContainer; - } - - @Override - public void channelRead0(ChannelHandlerContext ctx, HttpRequest nettyReq) - throws Exception { - LOG.trace("begin RequestDispatchObjectStoreChannelHandler channelRead0, " + - "ctx = {}, nettyReq = {}", ctx, nettyReq); - if (!nettyReq.getDecoderResult().isSuccess()) { - sendErrorResponse(ctx, BAD_REQUEST); - return; - } - - this.reqIn = new PipedInputStream(); - this.reqOut = new PipedOutputStream(reqIn); - this.respIn = new PipedInputStream(); - this.respOut = new PipedOutputStream(respIn); - - if (HttpHeaders.is100ContinueExpected(nettyReq)) { - LOG.trace("Sending continue response."); - ctx.writeAndFlush(new DefaultFullHttpResponse(HTTP_1_1, CONTINUE)); - } - - Future nettyResp = this.jerseyContainer.dispatch(nettyReq, - reqIn, respOut); - - ctx.pipeline().replace(this, - RequestContentObjectStoreChannelHandler.class.getSimpleName(), - new RequestContentObjectStoreChannelHandler(nettyReq, nettyResp, - reqOut, respIn, jerseyContainer)); - - LOG.trace("end RequestDispatchObjectStoreChannelHandler channelRead0, " + - "ctx = {}, nettyReq = {}", ctx, nettyReq); - } - - @Override - public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { - super.exceptionCaught(ctx, cause); - IOUtils.cleanupWithLogger(null, this.reqIn, this.reqOut, this.respIn, - this.respOut); - } -} diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/package-info.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/package-info.java deleted file mode 100644 index f4aa675b678..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/package-info.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/** - * Netty-based HTTP server implementation for Ozone. - */ -@InterfaceAudience.Private -package org.apache.hadoop.ozone.web.netty; - -import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/package-info.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/package-info.java deleted file mode 100644 index d93edf8d77e..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web; - -/** - * Ozone JAX-RS rest interface related classes and handlers. - */ \ No newline at end of file diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java deleted file mode 100644 index eb694c412f7..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java +++ /dev/null @@ -1,620 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.storage; - -import com.google.common.base.Strings; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.scm.ByteStringHelper; -import org.apache.hadoop.hdds.scm.client.HddsClientUtils; -import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ChecksumType; -import org.apache.hadoop.ozone.client.io.KeyInputStream; -import org.apache.hadoop.ozone.client.io.KeyOutputStream; -import org.apache.hadoop.ozone.client.io.LengthInputStream; -import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.helpers.OpenKeySession; -import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; -import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.OzoneConsts.Versioning; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; -import org.apache.hadoop.ozone.security.acl.OzoneAclConfig; -import org.apache.hadoop.ozone.web.request.OzoneQuota; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.ozone.client.rest.OzoneException; -import org.apache.hadoop.ozone.web.handlers.BucketArgs; -import org.apache.hadoop.ozone.web.handlers.KeyArgs; -import org.apache.hadoop.ozone.web.handlers.ListArgs; -import org.apache.hadoop.ozone.web.handlers.VolumeArgs; -import org.apache.hadoop.ozone.web.handlers.UserArgs; -import org.apache.hadoop.ozone.web.interfaces.StorageHandler; -import org.apache.hadoop.ozone.web.response.*; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.io.OutputStream; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Objects; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; - -/** - * A {@link StorageHandler} implementation that distributes object storage - * across the nodes of an HDFS cluster. - */ -public final class DistributedStorageHandler implements StorageHandler { - private static final Logger LOG = - LoggerFactory.getLogger(DistributedStorageHandler.class); - - private final StorageContainerLocationProtocol - storageContainerLocationClient; - private final OzoneManagerProtocol - ozoneManagerClient; - private final XceiverClientManager xceiverClientManager; - private final ACLType userRights; - private final ACLType groupRights; - private int chunkSize; - private final long streamBufferFlushSize; - private final long streamBufferMaxSize; - private final long watchTimeout; - private final long blockSize; - private final ChecksumType checksumType; - private final int bytesPerChecksum; - private final boolean verifyChecksum; - private final int maxRetryCount; - private final long retryInterval; - - /** - * Creates a new DistributedStorageHandler. - * - * @param conf configuration - * @param storageContainerLocation StorageContainerLocationProtocol proxy - * @param ozoneManagerClient OzoneManager proxy - */ - public DistributedStorageHandler(OzoneConfiguration conf, - StorageContainerLocationProtocol storageContainerLocation, - OzoneManagerProtocol ozoneManagerClient) { - this.ozoneManagerClient = ozoneManagerClient; - this.storageContainerLocationClient = storageContainerLocation; - this.xceiverClientManager = new XceiverClientManager(conf); - - chunkSize = (int)conf.getStorageSize(ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY, - ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_DEFAULT, StorageUnit.BYTES); - // Get default acl rights for user and group. - OzoneAclConfig aclConfig = conf.getObject(OzoneAclConfig.class); - this.userRights = aclConfig.getUserDefaultRights(); - this.groupRights = aclConfig.getGroupDefaultRights(); - if(chunkSize > OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE) { - LOG.warn("The chunk size ({}) is not allowed to be more than" - + " the maximum size ({})," - + " resetting to the maximum size.", - chunkSize, OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE); - chunkSize = OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE; - } - streamBufferFlushSize = (long) conf - .getStorageSize(OzoneConfigKeys.OZONE_CLIENT_STREAM_BUFFER_FLUSH_SIZE, - OzoneConfigKeys.OZONE_CLIENT_STREAM_BUFFER_FLUSH_SIZE_DEFAULT, - StorageUnit.BYTES); - streamBufferMaxSize = (long) conf - .getStorageSize(OzoneConfigKeys.OZONE_CLIENT_STREAM_BUFFER_MAX_SIZE, - OzoneConfigKeys.OZONE_CLIENT_STREAM_BUFFER_MAX_SIZE_DEFAULT, - StorageUnit.BYTES); - blockSize = (long) conf.getStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, - OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT, StorageUnit.BYTES); - watchTimeout = - conf.getTimeDuration(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, - OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS); - - int configuredChecksumSize = (int) conf.getStorageSize( - OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM, - OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM_DEFAULT, - StorageUnit.BYTES); - - if(configuredChecksumSize < - OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE) { - LOG.warn("The checksum size ({}) is not allowed to be less than the " + - "minimum size ({}), resetting to the minimum size.", - configuredChecksumSize, - OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE); - bytesPerChecksum = - OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE; - } else { - bytesPerChecksum = configuredChecksumSize; - } - String checksumTypeStr = conf.get( - OzoneConfigKeys.OZONE_CLIENT_CHECKSUM_TYPE, - OzoneConfigKeys.OZONE_CLIENT_CHECKSUM_TYPE_DEFAULT); - this.checksumType = ChecksumType.valueOf(checksumTypeStr); - this.verifyChecksum = - conf.getBoolean(OzoneConfigKeys.OZONE_CLIENT_VERIFY_CHECKSUM, - OzoneConfigKeys.OZONE_CLIENT_VERIFY_CHECKSUM_DEFAULT); - this.maxRetryCount = - conf.getInt(OzoneConfigKeys.OZONE_CLIENT_MAX_RETRIES, OzoneConfigKeys. - OZONE_CLIENT_MAX_RETRIES_DEFAULT); - this.retryInterval = OzoneUtils.getTimeDurationInMS(conf, - OzoneConfigKeys.OZONE_CLIENT_RETRY_INTERVAL, - OzoneConfigKeys.OZONE_CLIENT_RETRY_INTERVAL_DEFAULT); - boolean isUnsafeByteOperationsEnabled = conf.getBoolean( - OzoneConfigKeys.OZONE_UNSAFEBYTEOPERATIONS_ENABLED, - OzoneConfigKeys.OZONE_UNSAFEBYTEOPERATIONS_ENABLED_DEFAULT); - ByteStringHelper.init(isUnsafeByteOperationsEnabled); - - } - - @Override - public void createVolume(VolumeArgs args) throws IOException, OzoneException { - long quota = args.getQuota() == null ? - OzoneConsts.MAX_QUOTA_IN_BYTES : args.getQuota().sizeInBytes(); - OzoneAcl userAcl = new OzoneAcl(ACLIdentityType.USER, args.getUserName(), - userRights, ACCESS); - OmVolumeArgs.Builder builder = OmVolumeArgs.newBuilder(); - builder.setAdminName(args.getAdminName()) - .setOwnerName(args.getUserName()) - .setVolume(args.getVolumeName()) - .setQuotaInBytes(quota) - .addOzoneAcls(OzoneAcl.toProtobuf(userAcl)); - if (args.getGroups() != null) { - for (String group : args.getGroups()) { - OzoneAcl groupAcl = - new OzoneAcl(ACLIdentityType.GROUP, group, groupRights, ACCESS); - builder.addOzoneAcls(OzoneAcl.toProtobuf(groupAcl)); - } - } - ozoneManagerClient.createVolume(builder.build()); - } - - @Override - public void setVolumeOwner(VolumeArgs args) throws - IOException, OzoneException { - ozoneManagerClient.setOwner(args.getVolumeName(), args.getUserName()); - } - - @Override - public void setVolumeQuota(VolumeArgs args, boolean remove) - throws IOException, OzoneException { - long quota = remove ? OzoneConsts.MAX_QUOTA_IN_BYTES : - args.getQuota().sizeInBytes(); - ozoneManagerClient.setQuota(args.getVolumeName(), quota); - } - - @Override - public boolean checkVolumeAccess(String volume, OzoneAcl acl) - throws IOException, OzoneException { - return ozoneManagerClient - .checkVolumeAccess(volume, OzoneAcl.toProtobuf(acl)); - } - - @Override - public ListVolumes listVolumes(ListArgs args) - throws IOException, OzoneException { - int maxNumOfKeys = args.getMaxKeys(); - if (maxNumOfKeys <= 0 || - maxNumOfKeys > OzoneConsts.MAX_LISTVOLUMES_SIZE) { - throw new IllegalArgumentException( - String.format("Illegal max number of keys specified," - + " the value must be in range (0, %d], actual : %d.", - OzoneConsts.MAX_LISTVOLUMES_SIZE, maxNumOfKeys)); - } - - List listResult; - if (args.isRootScan()) { - listResult = ozoneManagerClient.listAllVolumes(args.getPrefix(), - args.getPrevKey(), args.getMaxKeys()); - } else { - UserArgs userArgs = args.getArgs(); - if (userArgs == null || userArgs.getUserName() == null) { - throw new IllegalArgumentException("Illegal argument," - + " missing user argument."); - } - listResult = ozoneManagerClient.listVolumeByUser( - args.getArgs().getUserName(), args.getPrefix(), args.getPrevKey(), - args.getMaxKeys()); - } - - // TODO Add missing fields createdBy, bucketCount and bytesUsed - ListVolumes result = new ListVolumes(); - for (OmVolumeArgs volumeArgs : listResult) { - VolumeInfo info = new VolumeInfo(); - OzoneManagerProtocolProtos.VolumeInfo - infoProto = volumeArgs.getProtobuf(); - info.setOwner(new VolumeOwner(infoProto.getOwnerName())); - info.setQuota(OzoneQuota.getOzoneQuota(infoProto.getQuotaInBytes())); - info.setVolumeName(infoProto.getVolume()); - info.setCreatedOn( - HddsClientUtils.formatDateTime(infoProto.getCreationTime())); - result.addVolume(info); - } - - return result; - } - - @Override - public void deleteVolume(VolumeArgs args) - throws IOException, OzoneException { - ozoneManagerClient.deleteVolume(args.getVolumeName()); - } - - @Override - public VolumeInfo getVolumeInfo(VolumeArgs args) - throws IOException, OzoneException { - OmVolumeArgs volumeArgs = - ozoneManagerClient.getVolumeInfo(args.getVolumeName()); - //TODO: add support for createdOn and other fields in getVolumeInfo - VolumeInfo volInfo = - new VolumeInfo(volumeArgs.getVolume(), null, - volumeArgs.getAdminName()); - volInfo.setOwner(new VolumeOwner(volumeArgs.getOwnerName())); - volInfo.setQuota(OzoneQuota.getOzoneQuota(volumeArgs.getQuotaInBytes())); - volInfo.setCreatedOn( - HddsClientUtils.formatDateTime(volumeArgs.getCreationTime())); - return volInfo; - } - - @Override - public void createBucket(final BucketArgs args) - throws IOException, OzoneException { - OmBucketInfo.Builder builder = OmBucketInfo.newBuilder(); - builder.setVolumeName(args.getVolumeName()) - .setBucketName(args.getBucketName()); - if(args.getStorageType() != null) { - builder.setStorageType(args.getStorageType()); - } - if(args.getVersioning() != null) { - builder.setIsVersionEnabled(getBucketVersioningProtobuf( - args.getVersioning())); - } - ozoneManagerClient.createBucket(builder.build()); - } - - /** - * Converts OzoneConts.Versioning enum to boolean. - * - * @param version - * @return corresponding boolean value - */ - private boolean getBucketVersioningProtobuf( - Versioning version) { - if(version != null) { - switch(version) { - case ENABLED: - return true; - case NOT_DEFINED: - case DISABLED: - default: - return false; - } - } - return false; - } - - @Override - public void setBucketVersioning(BucketArgs args) - throws IOException, OzoneException { - OmBucketArgs.Builder builder = OmBucketArgs.newBuilder(); - builder.setVolumeName(args.getVolumeName()) - .setBucketName(args.getBucketName()) - .setIsVersionEnabled(getBucketVersioningProtobuf( - args.getVersioning())); - ozoneManagerClient.setBucketProperty(builder.build()); - } - - @Override - public void setBucketStorageClass(BucketArgs args) - throws IOException, OzoneException { - OmBucketArgs.Builder builder = OmBucketArgs.newBuilder(); - builder.setVolumeName(args.getVolumeName()) - .setBucketName(args.getBucketName()) - .setStorageType(args.getStorageType()); - ozoneManagerClient.setBucketProperty(builder.build()); - } - - @Override - public void deleteBucket(BucketArgs args) - throws IOException, OzoneException { - ozoneManagerClient.deleteBucket(args.getVolumeName(), - args.getBucketName()); - } - - @Override - public void checkBucketAccess(BucketArgs args) - throws IOException, OzoneException { - throw new UnsupportedOperationException( - "checkBucketAccess not implemented"); - } - - @Override - public ListBuckets listBuckets(ListArgs args) - throws IOException, OzoneException { - ListBuckets result = new ListBuckets(); - UserArgs userArgs = args.getArgs(); - if (userArgs instanceof VolumeArgs) { - VolumeArgs va = (VolumeArgs) userArgs; - if (Strings.isNullOrEmpty(va.getVolumeName())) { - throw new IllegalArgumentException("Illegal argument," - + " volume name cannot be null or empty."); - } - - int maxNumOfKeys = args.getMaxKeys(); - if (maxNumOfKeys <= 0 || - maxNumOfKeys > OzoneConsts.MAX_LISTBUCKETS_SIZE) { - throw new IllegalArgumentException( - String.format("Illegal max number of keys specified," - + " the value must be in range (0, %d], actual : %d.", - OzoneConsts.MAX_LISTBUCKETS_SIZE, maxNumOfKeys)); - } - - List buckets = - ozoneManagerClient.listBuckets(va.getVolumeName(), - args.getPrevKey(), args.getPrefix(), args.getMaxKeys()); - - // Convert the result for the web layer. - for (OmBucketInfo bucketInfo : buckets) { - BucketInfo bk = new BucketInfo(); - bk.setVolumeName(bucketInfo.getVolumeName()); - bk.setBucketName(bucketInfo.getBucketName()); - bk.setStorageType(bucketInfo.getStorageType()); - bk.setAcls(bucketInfo.getAcls()); - bk.setCreatedOn( - HddsClientUtils.formatDateTime(bucketInfo.getCreationTime())); - result.addBucket(bk); - } - return result; - } else { - throw new IllegalArgumentException("Illegal argument provided," - + " expecting VolumeArgs type but met " - + userArgs.getClass().getSimpleName()); - } - } - - @Override - public BucketInfo getBucketInfo(BucketArgs args) - throws IOException { - String volumeName = args.getVolumeName(); - String bucketName = args.getBucketName(); - OmBucketInfo omBucketInfo = ozoneManagerClient.getBucketInfo( - volumeName, bucketName); - BucketInfo bucketInfo = new BucketInfo(omBucketInfo.getVolumeName(), - omBucketInfo.getBucketName()); - if(omBucketInfo.getIsVersionEnabled()) { - bucketInfo.setVersioning(Versioning.ENABLED); - } else { - bucketInfo.setVersioning(Versioning.DISABLED); - } - bucketInfo.setStorageType(omBucketInfo.getStorageType()); - bucketInfo.setAcls(omBucketInfo.getAcls()); - bucketInfo.setCreatedOn( - HddsClientUtils.formatDateTime(omBucketInfo.getCreationTime())); - return bucketInfo; - } - - @Override - public OutputStream newKeyWriter(KeyArgs args) throws IOException, - OzoneException { - Objects.requireNonNull(args.getUserName(), - "Username should not be null"); - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setVolumeName(args.getVolumeName()) - .setBucketName(args.getBucketName()) - .setKeyName(args.getKeyName()) - .setDataSize(args.getSize()) - .setType(xceiverClientManager.getType()) - .setFactor(xceiverClientManager.getFactor()) - .setAcls(OzoneAclUtil.getAclList(args.getUserName(), - args.getGroups() != null ? Arrays.asList(args.getGroups()) : null, - ACLType.ALL, ACLType.ALL)) - .build(); - // contact OM to allocate a block for key. - OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs); - KeyOutputStream keyOutputStream = - new KeyOutputStream.Builder() - .setHandler(openKey) - .setXceiverClientManager(xceiverClientManager) - .setOmClient(ozoneManagerClient) - .setChunkSize(chunkSize) - .setRequestID(args.getRequestID()) - .setType(xceiverClientManager.getType()) - .setFactor(xceiverClientManager.getFactor()) - .setStreamBufferFlushSize(streamBufferFlushSize) - .setStreamBufferMaxSize(streamBufferMaxSize) - .setBlockSize(blockSize) - .setWatchTimeout(watchTimeout) - .setChecksumType(checksumType) - .setBytesPerChecksum(bytesPerChecksum) - .setMaxRetryCount(maxRetryCount) - .setRetryInterval(retryInterval) - .build(); - keyOutputStream.addPreallocateBlocks( - openKey.getKeyInfo().getLatestVersionLocations(), - openKey.getOpenVersion()); - return new OzoneOutputStream(keyOutputStream); - } - - @Override - public void commitKey(KeyArgs args, OutputStream stream) throws - IOException, OzoneException { - stream.close(); - } - - @Override - public LengthInputStream newKeyReader(KeyArgs args) throws IOException, - OzoneException { - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setVolumeName(args.getVolumeName()) - .setBucketName(args.getBucketName()) - .setKeyName(args.getKeyName()) - .setDataSize(args.getSize()) - .setRefreshPipeline(true) - .build(); - OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs); - return KeyInputStream.getFromOmKeyInfo( - keyInfo, xceiverClientManager, verifyChecksum); - } - - @Override - public void deleteKey(KeyArgs args) throws IOException, OzoneException { - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setVolumeName(args.getVolumeName()) - .setBucketName(args.getBucketName()) - .setKeyName(args.getKeyName()) - .build(); - ozoneManagerClient.deleteKey(keyArgs); - } - - @Override - public void renameKey(KeyArgs args, String toKeyName) - throws IOException, OzoneException { - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setVolumeName(args.getVolumeName()) - .setBucketName(args.getBucketName()) - .setKeyName(args.getKeyName()) - .build(); - ozoneManagerClient.renameKey(keyArgs, toKeyName); - } - - @Override - public KeyInfo getKeyInfo(KeyArgs args) throws IOException, OzoneException { - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setVolumeName(args.getVolumeName()) - .setBucketName(args.getBucketName()) - .setKeyName(args.getKeyName()) - .setRefreshPipeline(true) - .build(); - - OmKeyInfo omKeyInfo = ozoneManagerClient.lookupKey(keyArgs); - KeyInfo keyInfo = new KeyInfo(); - keyInfo.setVersion(0); - keyInfo.setKeyName(omKeyInfo.getKeyName()); - keyInfo.setSize(omKeyInfo.getDataSize()); - keyInfo.setCreatedOn( - HddsClientUtils.formatDateTime(omKeyInfo.getCreationTime())); - keyInfo.setModifiedOn( - HddsClientUtils.formatDateTime(omKeyInfo.getModificationTime())); - keyInfo.setType(ReplicationType.valueOf(omKeyInfo.getType().toString())); - return keyInfo; - } - - @Override - public KeyInfo getKeyInfoDetails(KeyArgs args) throws IOException{ - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setVolumeName(args.getVolumeName()) - .setBucketName(args.getBucketName()) - .setKeyName(args.getKeyName()) - .setRefreshPipeline(true) - .build(); - OmKeyInfo omKeyInfo = ozoneManagerClient.lookupKey(keyArgs); - List keyLocations = new ArrayList<>(); - omKeyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly() - .forEach((a) -> keyLocations.add(new KeyLocation(a.getContainerID(), - a.getLocalID(), a.getLength(), a.getOffset()))); - KeyInfoDetails keyInfoDetails = new KeyInfoDetails(); - keyInfoDetails.setVersion(0); - keyInfoDetails.setKeyName(omKeyInfo.getKeyName()); - keyInfoDetails.setSize(omKeyInfo.getDataSize()); - keyInfoDetails.setCreatedOn( - HddsClientUtils.formatDateTime(omKeyInfo.getCreationTime())); - keyInfoDetails.setModifiedOn( - HddsClientUtils.formatDateTime(omKeyInfo.getModificationTime())); - keyInfoDetails.setKeyLocations(keyLocations); - keyInfoDetails.setType(ReplicationType.valueOf(omKeyInfo.getType() - .toString())); - return keyInfoDetails; - } - - @Override - public ListKeys listKeys(ListArgs args) throws IOException, OzoneException { - ListKeys result = new ListKeys(); - UserArgs userArgs = args.getArgs(); - if (userArgs instanceof BucketArgs) { - BucketArgs bucketArgs = (BucketArgs) userArgs; - if (Strings.isNullOrEmpty(bucketArgs.getVolumeName())) { - throw new IllegalArgumentException("Illegal argument," - + " volume name cannot be null or empty."); - } - - if (Strings.isNullOrEmpty(bucketArgs.getBucketName())) { - throw new IllegalArgumentException("Illegal argument," - + " bucket name cannot be null or empty."); - } - - int maxNumOfKeys = args.getMaxKeys(); - if (maxNumOfKeys <= 0 || - maxNumOfKeys > OzoneConsts.MAX_LISTKEYS_SIZE) { - throw new IllegalArgumentException( - String.format("Illegal max number of keys specified," - + " the value must be in range (0, %d], actual : %d.", - OzoneConsts.MAX_LISTKEYS_SIZE, maxNumOfKeys)); - } - - List keys= - ozoneManagerClient.listKeys(bucketArgs.getVolumeName(), - bucketArgs.getBucketName(), - args.getPrevKey(), args.getPrefix(), args.getMaxKeys()); - - // Convert the result for the web layer. - for (OmKeyInfo info : keys) { - KeyInfo tempInfo = new KeyInfo(); - tempInfo.setVersion(0); - tempInfo.setKeyName(info.getKeyName()); - tempInfo.setSize(info.getDataSize()); - tempInfo.setCreatedOn( - HddsClientUtils.formatDateTime(info.getCreationTime())); - tempInfo.setModifiedOn( - HddsClientUtils.formatDateTime(info.getModificationTime())); - tempInfo.setType(ReplicationType.valueOf(info.getType().toString())); - - result.addKey(tempInfo); - } - return result; - } else { - throw new IllegalArgumentException("Illegal argument provided," - + " expecting BucketArgs type but met " - + userArgs.getClass().getSimpleName()); - } - } - - /** - * Closes DistributedStorageHandler. - */ - @Override - public void close() { - IOUtils.cleanupWithLogger(LOG, xceiverClientManager); - IOUtils.cleanupWithLogger(LOG, ozoneManagerClient); - IOUtils.cleanupWithLogger(LOG, storageContainerLocationClient); - } -} diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/package-info.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/package-info.java deleted file mode 100644 index f5499f5b33b..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/package-info.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/** - * Ozone storage handler implementation integrating REST interface front-end - * with container data pipeline back-end. - */ -@InterfaceAudience.Private -package org.apache.hadoop.ozone.web.storage; - -import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/userauth/Simple.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/userauth/Simple.java deleted file mode 100644 index 397c80f7da3..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/userauth/Simple.java +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.userauth; - - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.ozone.web.exceptions.ErrorTable; -import org.apache.hadoop.ozone.client.rest.OzoneException; -import org.apache.hadoop.ozone.web.handlers.UserArgs; -import org.apache.hadoop.ozone.client.rest.headers.Header; -import org.apache.hadoop.ozone.web.interfaces.UserAuth; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.security.UserGroupInformation; - -import javax.ws.rs.core.HttpHeaders; -import java.io.IOException; -import java.util.List; - -/** - * Simple is an UserAuth class that is used in the insecure - * mode of ozone. This maps more or less to the simple user scheme in - * HDFS. - */ -@InterfaceAudience.Private -public class Simple implements UserAuth { - /** - * Returns the x-ozone-user or the user on behalf of, This is - * used in volume creation path. - * - * @param userArgs - UserArgs - * - * @throws OzoneException - */ - @Override - public String getOzoneUser(UserArgs userArgs) throws OzoneException { - assert userArgs != null : "userArgs cannot be null"; - - HttpHeaders headers = userArgs.getHeaders(); - List users = headers.getRequestHeader(Header.OZONE_USER); - - if ((users == null) || (users.size() == 0)) { - return null; - } - if (users.size() > 1) { - throw ErrorTable.newError(ErrorTable.BAD_AUTHORIZATION, userArgs); - } - return users.get(0).toLowerCase().trim(); - } - - /** - * Returns the user name as a string from the URI and HTTP headers. - * - * @param userArgs - user args - * - * @throws OzoneException -- Allows the underlying system - * to throw, that error will get propagated to clients - */ - @Override - public String getUser(UserArgs userArgs) throws OzoneException { - assert userArgs != null : "userArgs cannot be null"; - - HttpHeaders headers = userArgs.getHeaders(); - List users = headers.getRequestHeader(HttpHeaders.AUTHORIZATION); - if (users == null || users.size() > 1) { - throw ErrorTable.newError(ErrorTable.BAD_AUTHORIZATION, userArgs); - } - - if (users.size() == 0) { - return null; - } - - String user = users.get(0).trim(); - if (user.startsWith(Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME)) { - user = user.replace(Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME, ""); - return user.toLowerCase().trim(); - } else { - throw ErrorTable.newError(ErrorTable.BAD_AUTHORIZATION, userArgs); - } - } - - - /** - * Returns true if a user is a Admin - {root and hdfs are treated as admins}. - * - * @param userArgs - User Args - * - * @throws OzoneException -- Allows the underlying system - * to throw, that error will get propagated to clients - */ - @Override - public boolean isAdmin(UserArgs userArgs) throws OzoneException { - assert userArgs != null : "userArgs cannot be null"; - - String user; - String currentUser; - try { - user = getUser(userArgs); - currentUser = UserGroupInformation.getCurrentUser().getShortUserName(); - } catch (IOException e) { - throw ErrorTable.newError(ErrorTable.BAD_AUTHORIZATION, userArgs); - } - return - (user.compareToIgnoreCase(OzoneConsts.OZONE_SIMPLE_ROOT_USER) == 0) || - (user.compareToIgnoreCase(OzoneConsts.OZONE_SIMPLE_HDFS_USER) == 0) - || (user.compareToIgnoreCase(currentUser) == 0); - } - - /** - * Returns true if the request is Anonymous. - * - * @param userArgs - user Args - * - * @throws OzoneException -- Allows the underlying system - * to throw, that error will get propagated to clients - */ - @Override - public boolean isAnonymous(UserArgs userArgs) throws OzoneException { - assert userArgs != null : "userArgs cannot be null"; - - return getUser(userArgs) == null; - } - - /** - * Returns true if the name is a recognizable user in the system. - * - * @param userName - Name of the user - * @param userArgs - user Args - * - * @throws OzoneException -- Allows the underlying system - * to throw, that error will get propagated to clients - */ - @Override - public boolean isUser(String userName, UserArgs userArgs) - throws OzoneException { - // In the simple case, all non-null users names are users :) - return userName != null; - } - - /** - * Returns all the Groups that user is a member of. - * - * @param userArgs - User Args - * - * @return String Array which contains 0 or more group names - * - * @throws OzoneException - */ - @Override - public String[] getGroups(UserArgs userArgs) throws OzoneException { - // Not implemented - return null; - } - -} diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/userauth/package-info.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/userauth/package-info.java deleted file mode 100644 index d498fc8ebeb..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/userauth/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/** - * Netty-based HTTP server implementation for Ozone. - */ -package org.apache.hadoop.ozone.web.userauth; \ No newline at end of file diff --git a/hadoop-ozone/objectstore-service/src/main/resources/META-INF/services/com.sun.jersey.spi.container.ContainerProvider b/hadoop-ozone/objectstore-service/src/main/resources/META-INF/services/com.sun.jersey.spi.container.ContainerProvider deleted file mode 100644 index 2e103fea7b7..00000000000 --- a/hadoop-ozone/objectstore-service/src/main/resources/META-INF/services/com.sun.jersey.spi.container.ContainerProvider +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -org.apache.hadoop.ozone.web.netty.ObjectStoreJerseyContainerProvider diff --git a/hadoop-ozone/objectstore-service/src/test/java/org/apache/hadoop/ozone/web/TestErrorCode.java b/hadoop-ozone/objectstore-service/src/test/java/org/apache/hadoop/ozone/web/TestErrorCode.java deleted file mode 100644 index 291ffd32c2a..00000000000 --- a/hadoop-ozone/objectstore-service/src/test/java/org/apache/hadoop/ozone/web/TestErrorCode.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web; - -import org.apache.hadoop.ozone.web.exceptions.ErrorTable; -import org.apache.hadoop.ozone.client.rest.OzoneException; -import org.junit.Test; - -import static junit.framework.TestCase.assertEquals; -import static org.apache.hadoop.ozone.web.utils.OzoneUtils.getRequestID; - -/** - * Test Ozone Error Codes. - */ -public class TestErrorCode { - /** - * Test Error Generator functions. - */ - @Test - public void testErrorGen() { - OzoneException e = ErrorTable - .newError(ErrorTable.ACCESS_DENIED, getRequestID(), "/test/path", - "localhost"); - assertEquals("localhost", e.getHostID()); - assertEquals(ErrorTable.ACCESS_DENIED.getShortMessage(), - e.getShortMessage()); - } - - @Test - public void testErrorGenWithException() { - OzoneException e = - new OzoneException(ErrorTable.ACCESS_DENIED.getHttpCode(), - "short message", new Exception("Hello")); - assertEquals("short message", e.getShortMessage()); - assertEquals("Hello", e.getMessage()); - } -} diff --git a/hadoop-ozone/objectstore-service/src/test/java/org/apache/hadoop/ozone/web/package-info.java b/hadoop-ozone/objectstore-service/src/test/java/org/apache/hadoop/ozone/web/package-info.java deleted file mode 100644 index 4ebe859caa8..00000000000 --- a/hadoop-ozone/objectstore-service/src/test/java/org/apache/hadoop/ozone/web/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Tests the REST error codes. - */ -package org.apache.hadoop.ozone.web; \ No newline at end of file diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/ObjectPrinter.java similarity index 59% rename from hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/package-info.java rename to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/ObjectPrinter.java index ddbc30e09f2..064d30ad1c2 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/package-info.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/ObjectPrinter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,25 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.web; +package org.apache.hadoop.ozone.web.ozShell; + +import java.io.IOException; + +import org.apache.hadoop.ozone.web.utils.JsonUtils; + /** - * Unit tests of generic ozone web app and rest utils. + * Utility to print out response object in human readable form. */ +public final class ObjectPrinter { + private ObjectPrinter() { + } + + public static String getObjectAsJson(Object o) throws IOException { + return JsonUtils.toJsonStringWithDefaultPrettyPrinter( + JsonUtils.toJsonString(o)); + } + + public static void printObjectAsJson(Object o) throws IOException { + System.out.println(getObjectAsJson(o)); + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/OzoneAddress.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/OzoneAddress.java index 2b54a7b1ba1..207b0b4f7ee 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/OzoneAddress.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/OzoneAddress.java @@ -25,7 +25,6 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.rest.OzoneException; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_HTTP_SCHEME; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_RPC_SCHEME; @@ -46,12 +45,12 @@ public class OzoneAddress { private String keyName = ""; - public OzoneAddress() throws OzoneException { + public OzoneAddress() throws OzoneClientException { this("o3:///"); } public OzoneAddress(String address) - throws OzoneException { + throws OzoneClientException { if (address == null || address.equals("")) { address = OZONE_RPC_SCHEME + ":///"; } @@ -87,17 +86,9 @@ public OzoneClient createClient(OzoneConfiguration conf) scheme = OZONE_RPC_SCHEME; } if (scheme.equals(OZONE_HTTP_SCHEME)) { - if (ozoneURI.getHost() != null && !ozoneURI.getAuthority() - .equals(EMPTY_HOST)) { - if (ozoneURI.getPort() == -1) { - client = OzoneClientFactory.getRestClient(ozoneURI.getHost()); - } else { - client = OzoneClientFactory - .getRestClient(ozoneURI.getHost(), ozoneURI.getPort(), conf); - } - } else { - client = OzoneClientFactory.getRestClient(conf); - } + throw new UnsupportedOperationException( + "REST schema is not supported any more. Please use AWS S3 protocol " + + "if you need REST interface."); } else if (scheme.equals(OZONE_RPC_SCHEME)) { if (ozoneURI.getHost() != null && !ozoneURI.getAuthority() .equals(EMPTY_HOST)) { @@ -126,7 +117,7 @@ public OzoneClient createClient(OzoneConfiguration conf) * @throws OzoneException */ protected URI parseURI(String uri) - throws OzoneException { + throws OzoneClientException { if ((uri == null) || uri.isEmpty()) { throw new OzoneClientException( "Ozone URI is needed to execute this command."); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/CreateBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/CreateBucketHandler.java index f9c35915eed..97d4ec7d1db 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/CreateBucketHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/CreateBucketHandler.java @@ -18,11 +18,14 @@ package org.apache.hadoop.ozone.web.ozShell.bucket; import org.apache.hadoop.hdds.protocol.StorageType; -import org.apache.hadoop.ozone.client.*; +import org.apache.hadoop.ozone.client.BucketArgs; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.web.ozShell.Handler; +import org.apache.hadoop.ozone.web.ozShell.ObjectPrinter; import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; import org.apache.hadoop.ozone.web.ozShell.Shell; -import org.apache.hadoop.ozone.web.utils.JsonUtils; import picocli.CommandLine.Command; import picocli.CommandLine.Option; @@ -82,8 +85,7 @@ public Void call() throws Exception { if (isVerbose()) { OzoneBucket bucket = vol.getBucket(bucketName); - System.out.printf(JsonUtils.toJsonStringWithDefaultPrettyPrinter( - JsonUtils.toJsonString(OzoneClientUtils.asBucketInfo(bucket)))); + ObjectPrinter.printObjectAsJson(bucket); } return null; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/InfoBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/InfoBucketHandler.java index c0f35f2d675..e5677a4b3d4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/InfoBucketHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/InfoBucketHandler.java @@ -19,12 +19,11 @@ import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientUtils; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.web.ozShell.Handler; +import org.apache.hadoop.ozone.web.ozShell.ObjectPrinter; import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; import org.apache.hadoop.ozone.web.ozShell.Shell; -import org.apache.hadoop.ozone.web.utils.JsonUtils; import picocli.CommandLine.Command; import picocli.CommandLine.Parameters; @@ -59,8 +58,8 @@ public Void call() throws Exception { OzoneVolume vol = client.getObjectStore().getVolume(volumeName); OzoneBucket bucket = vol.getBucket(bucketName); - System.out.printf("%s%n", JsonUtils.toJsonStringWithDefaultPrettyPrinter( - JsonUtils.toJsonString(OzoneClientUtils.asBucketInfo(bucket)))); + ObjectPrinter.printObjectAsJson(bucket); + return null; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/ListBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/ListBucketHandler.java index 8bc0029f631..746c7274de3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/ListBucketHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/ListBucketHandler.java @@ -18,19 +18,15 @@ package org.apache.hadoop.ozone.web.ozShell.bucket; -import java.util.ArrayList; import java.util.Iterator; -import java.util.List; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientUtils; import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.rest.response.BucketInfo; import org.apache.hadoop.ozone.web.ozShell.Handler; +import org.apache.hadoop.ozone.web.ozShell.ObjectPrinter; import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; import org.apache.hadoop.ozone.web.ozShell.Shell; -import org.apache.hadoop.ozone.web.utils.JsonUtils; import picocli.CommandLine.Command; import picocli.CommandLine.Help.Visibility; @@ -85,20 +81,20 @@ public Void call() throws Exception { OzoneVolume vol = client.getObjectStore().getVolume(volumeName); Iterator bucketIterator = vol.listBuckets(prefix, startBucket); - List bucketList = new ArrayList<>(); + + int counter = 0; while (maxBuckets > 0 && bucketIterator.hasNext()) { - BucketInfo bucketInfo = - OzoneClientUtils.asBucketInfo(bucketIterator.next()); - bucketList.add(bucketInfo); + ObjectPrinter.printObjectAsJson(bucketIterator.next()); + maxBuckets -= 1; + counter++; } if (isVerbose()) { System.out.printf("Found : %d buckets for volume : %s ", - bucketList.size(), volumeName); + counter, volumeName); } - System.out.println(JsonUtils.toJsonStringWithDefaultPrettyPrinter( - JsonUtils.toJsonString(bucketList))); + return null; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/InfoKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/InfoKeyHandler.java index fa345e3a343..afc3ece1c69 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/InfoKeyHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/InfoKeyHandler.java @@ -20,13 +20,12 @@ import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientUtils; import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.web.ozShell.Handler; +import org.apache.hadoop.ozone.web.ozShell.ObjectPrinter; import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; import org.apache.hadoop.ozone.web.ozShell.Shell; -import org.apache.hadoop.ozone.web.utils.JsonUtils; import picocli.CommandLine.Command; import picocli.CommandLine.Parameters; @@ -64,8 +63,7 @@ public Void call() throws Exception { OzoneBucket bucket = vol.getBucket(bucketName); OzoneKeyDetails key = bucket.getKey(keyName); - System.out.printf("%s%n", JsonUtils.toJsonStringWithDefaultPrettyPrinter( - JsonUtils.toJsonString(OzoneClientUtils.asKeyInfoDetails(key)))); + ObjectPrinter.printObjectAsJson(key); return null; } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/ListKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/ListKeyHandler.java index 111ce1607c1..9829eefed00 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/ListKeyHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/ListKeyHandler.java @@ -18,20 +18,16 @@ package org.apache.hadoop.ozone.web.ozShell.keys; -import java.util.ArrayList; import java.util.Iterator; -import java.util.List; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientUtils; import org.apache.hadoop.ozone.client.OzoneKey; import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.rest.response.KeyInfo; import org.apache.hadoop.ozone.web.ozShell.Handler; +import org.apache.hadoop.ozone.web.ozShell.ObjectPrinter; import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; import org.apache.hadoop.ozone.web.ozShell.Shell; -import org.apache.hadoop.ozone.web.utils.JsonUtils; import picocli.CommandLine.Command; import picocli.CommandLine.Option; @@ -88,27 +84,26 @@ public Void call() throws Exception { OzoneBucket bucket = vol.getBucket(bucketName); Iterator keyIterator = bucket.listKeys(prefix, startKey); - List keyInfos = new ArrayList<>(); int maxKeyLimit = maxKeys; + + int counter = 0; while (maxKeys > 0 && keyIterator.hasNext()) { - KeyInfo key = OzoneClientUtils.asKeyInfo(keyIterator.next()); - keyInfos.add(key); + OzoneKey ozoneKey = keyIterator.next(); + ObjectPrinter.printObjectAsJson(ozoneKey); maxKeys -= 1; + counter++; } // More keys were returned notify about max length if (keyIterator.hasNext()) { System.out.println("Listing first " + maxKeyLimit + " entries of the " + "result. Use --length (-l) to override max returned keys."); + } else if (isVerbose()) { + System.out.printf("Found : %d keys for bucket %s in volume : %s ", + counter, bucketName, volumeName); } - if (isVerbose()) { - System.out.printf("Found : %d keys for bucket %s in volume : %s ", - keyInfos.size(), bucketName, volumeName); - } - System.out.println(JsonUtils.toJsonStringWithDefaultPrettyPrinter( - JsonUtils.toJsonString(keyInfos))); return null; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/CreateVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/CreateVolumeHandler.java index 6feaeee20ab..ddd835069a6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/CreateVolumeHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/CreateVolumeHandler.java @@ -19,13 +19,12 @@ package org.apache.hadoop.ozone.web.ozShell.volume; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientUtils; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.VolumeArgs; import org.apache.hadoop.ozone.web.ozShell.Handler; +import org.apache.hadoop.ozone.web.ozShell.ObjectPrinter; import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; import org.apache.hadoop.ozone.web.ozShell.Shell; -import org.apache.hadoop.ozone.web.utils.JsonUtils; import org.apache.hadoop.security.UserGroupInformation; import picocli.CommandLine.Command; @@ -92,8 +91,7 @@ public Void call() throws Exception { if (isVerbose()) { OzoneVolume vol = client.getObjectStore().getVolume(volumeName); - System.out.printf("%s%n", JsonUtils.toJsonStringWithDefaultPrettyPrinter( - JsonUtils.toJsonString(OzoneClientUtils.asVolumeInfo(vol)))); + ObjectPrinter.printObjectAsJson(vol); } return null; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/InfoVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/InfoVolumeHandler.java index 9c5f8726f36..0d8723fc148 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/InfoVolumeHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/InfoVolumeHandler.java @@ -19,12 +19,11 @@ package org.apache.hadoop.ozone.web.ozShell.volume; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientUtils; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.web.ozShell.Handler; +import org.apache.hadoop.ozone.web.ozShell.ObjectPrinter; import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; import org.apache.hadoop.ozone.web.ozShell.Shell; -import org.apache.hadoop.ozone.web.utils.JsonUtils; import picocli.CommandLine.Command; import picocli.CommandLine.Parameters; @@ -52,8 +51,7 @@ public Void call() throws Exception { String volumeName = address.getVolumeName(); OzoneVolume vol = client.getObjectStore().getVolume(volumeName); - System.out.printf("%s%n", JsonUtils.toJsonStringWithDefaultPrettyPrinter( - JsonUtils.toJsonString(OzoneClientUtils.asVolumeInfo(vol)))); + ObjectPrinter.printObjectAsJson(vol); return null; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/ListVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/ListVolumeHandler.java index 474361756a4..a486fb1db22 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/ListVolumeHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/ListVolumeHandler.java @@ -18,20 +18,16 @@ package org.apache.hadoop.ozone.web.ozShell.volume; -import java.util.ArrayList; import java.util.Iterator; -import java.util.List; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientUtils; import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.rest.response.VolumeInfo; import org.apache.hadoop.ozone.web.ozShell.Handler; +import org.apache.hadoop.ozone.web.ozShell.ObjectPrinter; import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; import org.apache.hadoop.ozone.web.ozShell.Shell; -import org.apache.hadoop.ozone.web.utils.JsonUtils; - import org.apache.hadoop.security.UserGroupInformation; + import picocli.CommandLine.Command; import picocli.CommandLine.Option; import picocli.CommandLine.Parameters; @@ -93,20 +89,19 @@ public Void call() throws Exception { volumeIterator = client.getObjectStore().listVolumes(prefix); } - List volumeInfos = new ArrayList<>(); - + int counter = 0; while (maxVolumes > 0 && volumeIterator.hasNext()) { - VolumeInfo volume = OzoneClientUtils.asVolumeInfo(volumeIterator.next()); - volumeInfos.add(volume); + OzoneVolume next = volumeIterator.next(); + ObjectPrinter.printObjectAsJson(next); maxVolumes -= 1; + counter++; } if (isVerbose()) { - System.out.printf("Found : %d volumes for user : %s ", volumeInfos.size(), + System.out.printf("Found : %d volumes for user : %s ", counter, userName); } - System.out.println(JsonUtils.toJsonStringWithDefaultPrettyPrinter( - JsonUtils.toJsonString(volumeInfos))); + return null; } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/UpdateVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/UpdateVolumeHandler.java index 06f591b2852..7ddeae90c34 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/UpdateVolumeHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/UpdateVolumeHandler.java @@ -18,18 +18,17 @@ package org.apache.hadoop.ozone.web.ozShell.volume; -import picocli.CommandLine.Command; -import picocli.CommandLine.Option; -import picocli.CommandLine.Parameters; - import org.apache.hadoop.hdds.client.OzoneQuota; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.OzoneClientUtils; import org.apache.hadoop.ozone.web.ozShell.Handler; +import org.apache.hadoop.ozone.web.ozShell.ObjectPrinter; import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; import org.apache.hadoop.ozone.web.ozShell.Shell; -import org.apache.hadoop.ozone.web.utils.JsonUtils; + +import picocli.CommandLine.Command; +import picocli.CommandLine.Option; +import picocli.CommandLine.Parameters; /** * Executes update volume calls. @@ -71,8 +70,7 @@ public Void call() throws Exception { volume.setOwner(ownerName); } - System.out.printf("%s%n", JsonUtils.toJsonStringWithDefaultPrettyPrinter( - JsonUtils.toJsonString(OzoneClientUtils.asVolumeInfo(volume)))); + ObjectPrinter.printObjectAsJson(volume); return null; } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/TestObjectPrinter.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/TestObjectPrinter.java new file mode 100644 index 00000000000..82e755ccf7e --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/TestObjectPrinter.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.web.ozShell; + +import java.io.IOException; +import java.util.ArrayList; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.client.protocol.ClientProtocol; + +import org.junit.Assert; +import static org.junit.Assert.*; +import org.junit.Test; +import org.mockito.Mockito; + +/** + * Test the json object printer. + */ +public class TestObjectPrinter { + + @Test + public void printObjectAsJson() throws IOException { + + OzoneConfiguration conf = new OzoneConfiguration(); + OzoneVolume volume = + new OzoneVolume(conf, Mockito.mock(ClientProtocol.class), "name", + "admin", "owner", 1L, 0L, + new ArrayList<>()); + + String result = ObjectPrinter.getObjectAsJson(volume); + Assert.assertTrue("Result is not a proper json", + result.contains("\"owner\"")); + } +} \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/TestOzoneAddress.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/TestOzoneAddress.java index 600cf30db7e..7ae052059be 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/TestOzoneAddress.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/TestOzoneAddress.java @@ -22,7 +22,7 @@ import java.util.Arrays; import java.util.Collection; -import org.apache.hadoop.ozone.client.rest.OzoneException; +import org.apache.hadoop.ozone.client.OzoneClientException; import org.junit.Assert; import org.junit.Test; @@ -42,9 +42,6 @@ public static Collection data() { {"o3fs://localhost:9878/"}, {"o3fs://localhost/"}, {"o3fs:///"}, - {"http://localhost:9878/"}, - {"http://localhost/"}, - {"http:///"}, {"/"}, {""} }); @@ -57,7 +54,7 @@ public TestOzoneAddress(String prefix) { } @Test - public void checkUrlTypes() throws OzoneException, IOException { + public void checkUrlTypes() throws OzoneClientException, IOException { OzoneAddress address; address = new OzoneAddress(""); diff --git a/hadoop-ozone/ozonefs/pom.xml b/hadoop-ozone/ozonefs/pom.xml index 9654df7c009..d495cc61f98 100644 --- a/hadoop-ozone/ozonefs/pom.xml +++ b/hadoop-ozone/ozonefs/pom.xml @@ -137,11 +137,6 @@ - - org.apache.hadoop - hadoop-ozone-objectstore-service - provided - com.google.code.findbugs findbugs diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java index 913bb3ebd56..2e9e3a4d41e 100644 --- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java +++ b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java @@ -18,40 +18,36 @@ package org.apache.hadoop.fs.ozone; -import org.apache.commons.lang3.RandomStringUtils; +import java.io.IOException; +import java.util.Arrays; + import org.apache.hadoop.conf.StorageUnit; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.web.handlers.BucketArgs; -import org.apache.hadoop.ozone.web.handlers.UserArgs; -import org.apache.hadoop.ozone.web.interfaces.StorageHandler; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; -import org.apache.hadoop.ozone.web.handlers.VolumeArgs; +import org.apache.hadoop.ozone.TestDataUtil; +import org.apache.hadoop.ozone.client.OzoneBucket; + +import org.apache.commons.lang3.RandomStringUtils; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; -import java.io.IOException; -import java.util.Arrays; - /** * Test OzoneFSInputStream by reading through multiple interfaces. */ public class TestOzoneFSInputStream { private static MiniOzoneCluster cluster = null; private static FileSystem fs; - private static StorageHandler storageHandler; private static Path filePath = null; private static byte[] data = null; @@ -71,22 +67,9 @@ public static void init() throws Exception { .setNumDatanodes(10) .build(); cluster.waitForClusterToBeReady(); - storageHandler = - new ObjectStoreHandler(conf).getStorageHandler(); // create a volume and a bucket to be used by OzoneFileSystem - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - UserArgs userArgs = new UserArgs(null, OzoneUtils.getRequestID(), - null, null, null, null); - VolumeArgs volumeArgs = new VolumeArgs(volumeName, userArgs); - volumeArgs.setUserName(userName); - volumeArgs.setAdminName(adminName); - storageHandler.createVolume(volumeArgs); - BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs); - storageHandler.createBucket(bucketArgs); + OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster); // Fetch the host and port for File System init DatanodeDetails datanodeDetails = cluster.getHddsDatanodes().get(0) @@ -94,7 +77,7 @@ public static void init() throws Exception { // Set the fs.defaultFS and start the filesystem String uri = String.format("%s://%s.%s/", - OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName); + OzoneConsts.OZONE_URI_SCHEME, bucket.getName(), bucket.getVolumeName()); conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, uri); fs = FileSystem.get(conf); int fileLen = 100 * 1024 * 1024; @@ -111,7 +94,6 @@ public static void init() throws Exception { @AfterClass public static void shutdown() throws IOException { fs.close(); - storageHandler.close(); cluster.shutdown(); } diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java index ca3e64f18b9..2a7210103f6 100644 --- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java +++ b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java @@ -25,45 +25,40 @@ import java.util.Collection; import java.util.List; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameters; -import org.apache.commons.io.IOUtils; -import org.apache.commons.lang3.RandomStringUtils; -import org.junit.After; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.GlobalStorageStatistics; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.StorageStatistics; -import org.apache.hadoop.fs.GlobalStorageStatistics; -import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; -import org.apache.hadoop.ozone.web.handlers.BucketArgs; -import org.apache.hadoop.ozone.web.handlers.UserArgs; -import org.apache.hadoop.ozone.web.handlers.VolumeArgs; -import org.apache.hadoop.ozone.web.interfaces.StorageHandler; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.TestDataUtil; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; +import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; import static java.nio.charset.StandardCharsets.UTF_8; +import org.apache.commons.io.IOUtils; +import org.apache.commons.lang3.RandomStringUtils; import static org.apache.hadoop.fs.ozone.Constants.OZONE_DEFAULT_USER; +import org.junit.After; +import org.junit.Assert; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; /** * Test OzoneFileSystem Interfaces. @@ -93,17 +88,15 @@ public static Collection data() { private boolean useAbsolutePath; - private static MiniOzoneCluster cluster = null; + private MiniOzoneCluster cluster = null; - private static FileSystem fs; + private FileSystem fs; - private static OzoneFileSystem o3fs; + private OzoneFileSystem o3fs; - private static String volumeName; + private String volumeName; - private static String bucketName; - - private static StorageHandler storageHandler; + private String bucketName; private OzoneFSStorageStatistics statistics; @@ -118,27 +111,18 @@ public TestOzoneFileInterfaces(boolean setDefaultFs, @Before public void init() throws Exception { + volumeName = RandomStringUtils.randomAlphabetic(10).toLowerCase(); + bucketName = RandomStringUtils.randomAlphabetic(10).toLowerCase(); + OzoneConfiguration conf = new OzoneConfiguration(); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) .build(); cluster.waitForClusterToBeReady(); - storageHandler = - new ObjectStoreHandler(conf).getStorageHandler(); // create a volume and a bucket to be used by OzoneFileSystem - userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - volumeName = "volume" + RandomStringUtils.randomNumeric(5); - bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - UserArgs userArgs = new UserArgs(null, OzoneUtils.getRequestID(), - null, null, null, null); - VolumeArgs volumeArgs = new VolumeArgs(volumeName, userArgs); - volumeArgs.setUserName(userName); - volumeArgs.setAdminName(adminName); - storageHandler.createVolume(volumeArgs); - BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs); - storageHandler.createBucket(bucketArgs); + OzoneBucket bucket = + TestDataUtil.createVolumeAndBucket(cluster, volumeName, bucketName); rootPath = String .format("%s://%s.%s/", OzoneConsts.OZONE_URI_SCHEME, bucketName, @@ -161,7 +145,6 @@ public void teardown() throws IOException { cluster.shutdown(); } IOUtils.closeQuietly(fs); - IOUtils.closeQuietly(storageHandler); } @Test diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java index ac8f11fc905..4b50a2a0f24 100644 --- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java @@ -18,40 +18,33 @@ package org.apache.hadoop.fs.ozone; -import org.apache.commons.io.IOUtils; -import org.apache.commons.lang3.RandomStringUtils; +import java.io.IOException; +import java.util.Set; +import java.util.TreeSet; + import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.client.rest.OzoneException; -import org.apache.hadoop.ozone.web.handlers.BucketArgs; -import org.apache.hadoop.ozone.web.handlers.KeyArgs; -import org.apache.hadoop.ozone.web.handlers.UserArgs; -import org.apache.hadoop.ozone.web.handlers.VolumeArgs; -import org.apache.hadoop.ozone.web.interfaces.StorageHandler; -import org.apache.hadoop.ozone.web.response.KeyInfo; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; +import org.apache.hadoop.ozone.TestDataUtil; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClientException; +import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.test.GenericTestUtils; + +import org.apache.commons.io.IOUtils; import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; - -import java.io.IOException; -import java.util.Set; -import java.util.TreeSet; - -import org.junit.rules.Timeout; - import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.Timeout; /** * Ozone file system tests that are not covered by contract tests. @@ -66,11 +59,9 @@ public class TestOzoneFileSystem { private static FileSystem fs; private static OzoneFileSystem o3fs; - private static StorageHandler storageHandler; - private static UserArgs userArgs; private String volumeName; private String bucketName; - private String userName; + private String rootPath; @Before @@ -80,25 +71,14 @@ public void init() throws Exception { .setNumDatanodes(3) .build(); cluster.waitForClusterToBeReady(); - storageHandler = - new ObjectStoreHandler(conf).getStorageHandler(); // create a volume and a bucket to be used by OzoneFileSystem - userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - volumeName = "volume" + RandomStringUtils.randomNumeric(5); - bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - userArgs = new UserArgs(null, OzoneUtils.getRequestID(), - null, null, null, null); - VolumeArgs volumeArgs = new VolumeArgs(volumeName, userArgs); - volumeArgs.setUserName(userName); - volumeArgs.setAdminName(adminName); - storageHandler.createVolume(volumeArgs); - BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs); - storageHandler.createBucket(bucketArgs); + OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster); + volumeName = bucket.getVolumeName(); + bucketName = bucket.getName(); rootPath = String.format("%s://%s.%s/", - OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName); + OzoneConsts.OZONE_URI_SCHEME, bucket.getName(), bucket.getVolumeName()); // Set the fs.defaultFS and start the filesystem conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); @@ -112,7 +92,6 @@ public void teardown() throws IOException { cluster.shutdown(); } IOUtils.closeQuietly(fs); - IOUtils.closeQuietly(storageHandler); } @Test @@ -129,8 +108,8 @@ public void testCreateDoesNotAddParentDirKeys() throws Exception { Path child = new Path(parent, "child"); ContractTestUtils.touch(fs, child); - KeyInfo key = getKey(child, false); - assertEquals(key.getKeyName(), o3fs.pathToKey(child)); + OzoneKeyDetails key = getKey(child, false); + assertEquals(key.getName(), o3fs.pathToKey(child)); // Creating a child should not add parent keys to the bucket try { @@ -167,8 +146,8 @@ public void testDeleteCreatesFakeParentDir() throws Exception { // Deleting the only child should create the parent dir key if it does // not exist String parentKey = o3fs.pathToKey(parent) + "/"; - KeyInfo parentKeyInfo = getKey(parent, true); - assertEquals(parentKey, parentKeyInfo.getKeyName()); + OzoneKeyDetails parentKeyInfo = getKey(parent, true); + assertEquals(parentKey, parentKeyInfo.getName()); } @Test @@ -285,15 +264,14 @@ public void testListStatusOnSubDirs() throws Exception { fileStatus2.equals(dir12.toString())); } - private KeyInfo getKey(Path keyPath, boolean isDirectory) - throws IOException, OzoneException { + private OzoneKeyDetails getKey(Path keyPath, boolean isDirectory) + throws IOException, OzoneClientException { String key = o3fs.pathToKey(keyPath); if (isDirectory) { key = key + "/"; } - KeyArgs parentKeyArgs = new KeyArgs(volumeName, bucketName, key, - userArgs); - return storageHandler.getKeyInfo(parentKeyArgs); + return cluster.getClient().getObjectStore().getVolume(volumeName) + .getBucket(bucketName).getKey(key); } private void assertKeyNotFoundException(IOException ex) { diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsRenameDir.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsRenameDir.java index 0c639e7dccd..1d584651bf9 100644 --- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsRenameDir.java +++ b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsRenameDir.java @@ -18,30 +18,25 @@ package org.apache.hadoop.fs.ozone; -import org.apache.commons.lang3.RandomStringUtils; +import java.io.IOException; + import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.web.handlers.BucketArgs; -import org.apache.hadoop.ozone.web.handlers.UserArgs; -import org.apache.hadoop.ozone.web.handlers.VolumeArgs; -import org.apache.hadoop.ozone.web.interfaces.StorageHandler; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; +import org.apache.hadoop.ozone.TestDataUtil; +import org.apache.hadoop.ozone.client.OzoneBucket; + import org.junit.After; +import static org.junit.Assert.assertTrue; import org.junit.Before; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; - -import static org.junit.Assert.assertTrue; - /** * Unit Test for verifying directory rename operation through OzoneFS. */ @@ -51,7 +46,6 @@ public class TestOzoneFsRenameDir { private MiniOzoneCluster cluster = null; private OzoneConfiguration conf = null; - private static StorageHandler storageHandler; private static FileSystem fs; @Before @@ -61,22 +55,9 @@ public void init() throws Exception { .setNumDatanodes(1) .build(); cluster.waitForClusterToBeReady(); - storageHandler = - new ObjectStoreHandler(conf).getStorageHandler(); // create a volume and a bucket to be used by OzoneFileSystem - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - UserArgs userArgs = new UserArgs(null, OzoneUtils.getRequestID(), - null, null, null, null); - VolumeArgs volumeArgs = new VolumeArgs(volumeName, userArgs); - volumeArgs.setUserName(userName); - volumeArgs.setAdminName(adminName); - storageHandler.createVolume(volumeArgs); - BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs); - storageHandler.createBucket(bucketArgs); + OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster); // Fetch the host and port for File System init DatanodeDetails datanodeDetails = cluster.getHddsDatanodes().get(0) @@ -84,7 +65,7 @@ public void init() throws Exception { // Set the fs.defaultFS and start the filesystem String uri = String.format("%s://%s.%s/", - OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName); + OzoneConsts.OZONE_URI_SCHEME, bucket.getName(), bucket.getVolumeName()); conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, uri); fs = FileSystem.get(conf); LOG.info("fs.defaultFS=" + fs.getUri()); diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java index b63e182a1e7..56d63ac2f25 100644 --- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java +++ b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java @@ -18,26 +18,21 @@ package org.apache.hadoop.fs.ozone.contract; -import org.apache.commons.lang3.RandomStringUtils; +import java.io.IOException; + import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.client.rest.OzoneException; -import org.apache.hadoop.ozone.web.handlers.BucketArgs; -import org.apache.hadoop.ozone.web.handlers.UserArgs; -import org.apache.hadoop.ozone.web.handlers.VolumeArgs; -import org.apache.hadoop.ozone.web.interfaces.StorageHandler; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; +import org.apache.hadoop.ozone.TestDataUtil; +import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.junit.Assert; -import java.io.IOException; +import org.junit.Assert; /** * The contract of Ozone: only enabled if the test bucket is provided. @@ -45,7 +40,6 @@ class OzoneContract extends AbstractFSContract { private static MiniOzoneCluster cluster; - private static StorageHandler storageHandler; private static final String CONTRACT_XML = "contract/ozone.xml"; OzoneContract(Configuration conf) { @@ -75,7 +69,6 @@ public static void createCluster() throws IOException { } catch (Exception e) { throw new IOException(e); } - storageHandler = new ObjectStoreHandler(conf).getStorageHandler(); } private void copyClusterConfigs(String configKey) { @@ -87,27 +80,10 @@ public FileSystem getTestFileSystem() throws IOException { //assumes cluster is not null Assert.assertNotNull("cluster not created", cluster); - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - - - UserArgs userArgs = new UserArgs(null, OzoneUtils.getRequestID(), - null, null, null, null); - VolumeArgs volumeArgs = new VolumeArgs(volumeName, userArgs); - volumeArgs.setUserName(userName); - volumeArgs.setAdminName(adminName); - BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs); - try { - storageHandler.createVolume(volumeArgs); - storageHandler.createBucket(bucketArgs); - } catch (OzoneException e) { - throw new IOException(e.getMessage()); - } + OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster); String uri = String.format("%s://%s.%s/", - OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName); + OzoneConsts.OZONE_URI_SCHEME, bucket.getName(), bucket.getVolumeName()); getConf().set("fs.defaultFS", uri); copyClusterConfigs(OMConfigKeys.OZONE_OM_ADDRESS_KEY); copyClusterConfigs(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY); diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index 967303e5a55..9d00bc4d36e 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -46,7 +46,6 @@ ozonefs-lib-legacy tools integration-test - objectstore-service datanode s3gateway dist @@ -82,11 +81,6 @@ hadoop-ozone-ozone-manager ${ozone.version} - - org.apache.hadoop - hadoop-ozone-objectstore-service - ${ozone.version} - org.apache.hadoop hadoop-ozone-s3gateway diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestOzoneClientProducer.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestOzoneClientProducer.java index 17cf7bca5a3..252d87b307c 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestOzoneClientProducer.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestOzoneClientProducer.java @@ -17,31 +17,30 @@ */ package org.apache.hadoop.ozone.s3; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.test.LambdaTestUtils; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.mockito.Mockito; - import javax.ws.rs.container.ContainerRequestContext; import javax.ws.rs.core.MultivaluedHashMap; import javax.ws.rs.core.MultivaluedMap; import javax.ws.rs.core.UriInfo; - import java.io.IOException; import java.net.URI; import java.util.Arrays; import java.util.Collection; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.test.LambdaTestUtils; + import static org.apache.hadoop.ozone.s3.AWSAuthParser.AUTHORIZATION_HEADER; import static org.apache.hadoop.ozone.s3.AWSAuthParser.CONTENT_MD5; import static org.apache.hadoop.ozone.s3.AWSAuthParser.CONTENT_TYPE; import static org.apache.hadoop.ozone.s3.AWSAuthParser.HOST_HEADER; import static org.apache.hadoop.ozone.s3.AWSAuthParser.X_AMAZ_DATE; import static org.apache.hadoop.ozone.s3.AWSAuthParser.X_AMZ_CONTENT_SHA256; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.mockito.Mockito; /** * Test class for @{@link OzoneClientProducer}. @@ -87,8 +86,8 @@ public TestOzoneClientProducer(String authHeader, String contentMd5, @Test public void testGetClientFailure() throws Exception { - LambdaTestUtils.intercept(IOException.class, "Couldn't create" + - " protocol ", () -> producer.createClient()); + LambdaTestUtils.intercept(IOException.class, "Couldn't create", + () -> producer.createClient()); } private void setupContext() throws Exception { @@ -143,4 +142,4 @@ public static Collection data() { }); } -} \ No newline at end of file +} diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java index 88fcb7b687f..2cdbf0d97cc 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java @@ -16,28 +16,7 @@ */ package org.apache.hadoop.ozone.om; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.scm.cli.SQLCLI; -import org.apache.hadoop.ozone.web.handlers.BucketArgs; -import org.apache.hadoop.ozone.web.handlers.KeyArgs; -import org.apache.hadoop.ozone.web.handlers.UserArgs; -import org.apache.hadoop.ozone.web.handlers.VolumeArgs; -import org.apache.hadoop.ozone.web.interfaces.StorageHandler; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.After; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - import java.io.IOException; -import java.io.OutputStream; import java.nio.file.Files; import java.nio.file.Paths; import java.sql.Connection; @@ -52,11 +31,26 @@ import java.util.List; import java.util.UUID; +import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.TestDataUtil; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.scm.cli.SQLCLI; +import org.apache.hadoop.test.GenericTestUtils; + import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME; +import org.junit.After; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; +import org.junit.Before; +import org.junit.Ignore; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; /** * This class tests the CLI that transforms om.db into SQLite DB files. @@ -64,8 +58,7 @@ @RunWith(Parameterized.class) public class TestOmSQLCli { private MiniOzoneCluster cluster = null; - private StorageHandler storageHandler; - private UserArgs userArgs; + private OzoneConfiguration conf; private SQLCLI cli; @@ -108,48 +101,17 @@ public void setup() throws Exception { conf = new OzoneConfiguration(); cluster = MiniOzoneCluster.newBuilder(conf).build(); cluster.waitForClusterToBeReady(); - storageHandler = new ObjectStoreHandler(conf).getStorageHandler(); - userArgs = new UserArgs(null, OzoneUtils.getRequestID(), - null, null, null, null); - cluster.waitForClusterToBeReady(); + OzoneBucket bucket0 = + TestDataUtil.createVolumeAndBucket(cluster, volumeName0, bucketName0); + OzoneBucket bucket1 = + TestDataUtil.createVolumeAndBucket(cluster, volumeName1, bucketName1); + OzoneBucket bucket2 = + TestDataUtil.createVolumeAndBucket(cluster, volumeName0, bucketName2); - VolumeArgs createVolumeArgs0 = new VolumeArgs(volumeName0, userArgs); - createVolumeArgs0.setUserName(userName); - createVolumeArgs0.setAdminName(adminName); - storageHandler.createVolume(createVolumeArgs0); - VolumeArgs createVolumeArgs1 = new VolumeArgs(volumeName1, userArgs); - createVolumeArgs1.setUserName(userName); - createVolumeArgs1.setAdminName(adminName); - storageHandler.createVolume(createVolumeArgs1); - - BucketArgs bucketArgs0 = new BucketArgs(volumeName0, bucketName0, userArgs); - storageHandler.createBucket(bucketArgs0); - BucketArgs bucketArgs1 = new BucketArgs(volumeName1, bucketName1, userArgs); - storageHandler.createBucket(bucketArgs1); - BucketArgs bucketArgs2 = new BucketArgs(volumeName0, bucketName2, userArgs); - storageHandler.createBucket(bucketArgs2); - - KeyArgs keyArgs0 = - new KeyArgs(volumeName0, bucketName0, keyName0, userArgs); - keyArgs0.setSize(100); - KeyArgs keyArgs1 = - new KeyArgs(volumeName1, bucketName1, keyName1, userArgs); - keyArgs1.setSize(200); - KeyArgs keyArgs2 = - new KeyArgs(volumeName0, bucketName2, keyName2, userArgs); - keyArgs2.setSize(300); - KeyArgs keyArgs3 = - new KeyArgs(volumeName0, bucketName2, keyName3, userArgs); - keyArgs3.setSize(400); - - OutputStream stream = storageHandler.newKeyWriter(keyArgs0); - stream.close(); - stream = storageHandler.newKeyWriter(keyArgs1); - stream.close(); - stream = storageHandler.newKeyWriter(keyArgs2); - stream.close(); - stream = storageHandler.newKeyWriter(keyArgs3); - stream.close(); + TestDataUtil.createKey(bucket0, keyName0, ""); + TestDataUtil.createKey(bucket1, keyName1, ""); + TestDataUtil.createKey(bucket2, keyName2, ""); + TestDataUtil.createKey(bucket2, keyName3, ""); cluster.getOzoneManager().stop(); cluster.getStorageContainerManager().stop();